input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<gh_stars>0
from .header import *
from .biencoder import BERTBiEncoder
'''Cross-Attention BertRetrieval'''
class BERTRetrieval(nn.Module):
def __init__(self, model='bert-base-chinese'):
super(BERTRetrieval, self).__init__()
self.model = BertForSequenceClassification.from_pretrained(model, num_labels=2)
def forward(self, inpt, token_type_ids, attn_mask):
output = self.model(
input_ids=inpt,
attention_mask=attn_mask,
token_type_ids=token_type_ids,
)
logits = output[0] # [batch, 2]
return logits
class TopicPrediction(nn.Module):
'''bert model as the backbone for semantic embedding;
follow this work: 2020-COLING Towards Topic-Guided Conversational Recommender System.
P_{topic}(t)=softmax(e_t^T\cdot \rm{MLP([r^{(1)}; r^{(2)}])}), where r^{(1)} represents the concatenation of the dialog history and the target topic word ([SEP] separating); r^{(2)} represents the concatenation of the historical topic sequence'''
def __init__(self, vocab_size, dropout=0.3, model='bert-base-chinese'):
super(TopicPrediction, self).__init__()
self.bert = BertModel.from_pretrained(model)
self.predictor = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(768, vocab_size),
)
def forward(self, inpt, attn_mask):
inpt_embd = self.bert(
input_ids=inpt,
attention_mask=attn_mask,
)[0][:, 0, :]
rest = self.predictor(inpt_embd) # [B, V]
return rest
class BERTRetrievalAgent(RetrievalBaseAgent):
def __init__(self, multi_gpu, run_mode='train', lang='zh', kb=True, local_rank=0):
super(BERTRetrievalAgent, self).__init__(kb=kb)
try:
self.gpu_ids = list(range(len(multi_gpu.split(','))))
except:
raise Exception(f'[!] multi gpu ids are needed, but got: {multi_gpu}')
self.args = {
'lr': 1e-5,
'grad_clip': 1.0,
'samples': 10,
'multi_gpu': self.gpu_ids,
'talk_samples': 256,
'max_len': 256,
'vocab_file': 'bert-base-chinese',
'pad': 0,
'model': 'bert-base-chinese',
'amp_level': 'O2',
'local_rank': local_rank,
}
self.vocab = BertTokenizer.from_pretrained(self.args['vocab_file'])
self.model = BERTRetrieval(self.args['model'])
if torch.cuda.is_available():
self.model.cuda()
self.optimizer = transformers.AdamW(
self.model.parameters(),
lr=self.args['lr'],
)
self.criterion = nn.CrossEntropyLoss()
if run_mode == 'train':
self.model, self.optimizer = amp.initialize(
self.model,
self.optimizer,
opt_level=self.args['amp_level']
)
self.model = nn.parallel.DistributedDataParallel(
self.model, device_ids=[local_rank], output_device=local_rank,
)
self.show_parameters(self.args)
def train_model(self, train_iter, mode='train', recoder=None, idx_=0):
self.model.train()
total_loss, batch_num = 0, 0
pbar = tqdm(train_iter)
correct, s = 0, 0
for idx, batch in enumerate(pbar):
cid, token_type_ids, attn_mask, label = batch
self.optimizer.zero_grad()
output = self.model(cid, token_type_ids, attn_mask) # [B, 2]
loss = self.criterion(
output,
label.view(-1),
)
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
clip_grad_norm_(amp.master_params(self.optimizer), self.args['grad_clip'])
self.optimizer.step()
total_loss += loss.item()
batch_num += 1
now_correct = torch.max(F.softmax(output, dim=-1), dim=-1)[1] # [batch]
now_correct = torch.sum(now_correct == label).item()
correct += now_correct
s += len(label)
recoder.add_scalar(f'train-epoch-{idx_}/Loss', total_loss/batch_num, idx)
recoder.add_scalar(f'train-epoch-{idx_}/RunLoss', loss.item(), idx)
recoder.add_scalar(f'train-epoch-{idx_}/Acc', correct/s, idx)
recoder.add_scalar(f'train-epoch-{idx_}/RunAcc', now_correct/len(label), idx)
pbar.set_description(f'[!] train loss: {round(loss.item(), 4)}|{round(total_loss/batch_num, 4)}; acc: {round(now_correct/len(label), 4)}|{round(correct/s, 4)}')
recoder.add_scalar(f'train-whole/Loss', total_loss/batch_num, idx_)
recoder.add_scalar(f'train-whole/Acc', correct/s, idx_)
return round(total_loss / batch_num, 4)
@torch.no_grad()
def test_model(self, test_iter, path):
self.model.eval()
total_loss, batch_num = 0, 0
pbar = tqdm(test_iter)
rest = []
for idx, batch in enumerate(pbar):
cid, token_type_ids, attn_mask, label = batch
output = self.model(cid, token_type_ids, attn_mask) # [batch, 2]
loss = self.criterion(output, label.view(-1))
total_loss += loss.item()
batch_num += 1
output = F.softmax(output, dim=-1)[:, 1] # [batch]
preds = [i.tolist() for i in torch.split(output, self.args['samples'])]
labels = [i.tolist() for i in torch.split(label, self.args['samples'])]
for label, pred in zip(labels, preds):
pred = np.argsort(pred, axis=0)[::-1]
rest.append(([0], pred.tolist()))
print(f'[!] test loss: {round(total_loss/batch_num, 4)}')
p_1, r2_1, r10_1, r10_2, r10_5, MAP, MRR = cal_ir_metric(rest)
print(f'[TEST] P@1: {p_1}; R2@1: {r2_1}; R10@1: {r10_1}; R10@2: {r10_2}; R10@5: {r10_5}; MAP: {MAP}; MRR: {MRR}')
return round(total_loss/batch_num, 4)
@torch.no_grad()
def talk(self, msgs, topic=None):
self.model.eval()
utterances_, inpt_ids, token_type_ids, attn_mask = self.process_utterances(
topic, msgs, max_len=self.args['max_len']
)
# prepare the data input
output = self.model(inpt_ids, token_type_ids, attn_mask) # [B, 2]
output = F.softmax(output, dim=-1)[:, 1] # [B]
item = torch.argmax(output).item()
msg = utterances_[item]
return msg
def reverse_search(self, ctx, ctx_, res):
with torch.no_grad():
utterances_ = self.searcher.search(None, ctx, samples=self.args['talk_samples'])
utterances_ = [i['context'] for i in utterances_]
if ctx_ in utterances_:
mask_index = utterances_.index(ctx_)
else:
mask_index = None
utterances = [f'{i} [SEP] {res}' for i in utterances_]
ids = [torch.LongTensor(self.vocab.encode(i)[-512:]) for i in utterances]
ids = pad_sequence(ids, batch_first=True, padding_value=self.args['pad'])
if torch.cuda.is_available():
ids = ids.cuda()
output = self.model(ids) # [batch, 2]
if mask_index is not None:
output[mask_index][1] = -inf
output = F.softmax(output, dim=-1)[:, 1]
item = torch.argmax(output)
rest = utterances_[item]
return rest
def rerank(self, topic, msgs, topk=2):
self.model.eval()
with torch.no_grad():
utterances_, ids = self.process_utterances(topic, msgs)
output = self.model(ids)
output = F.softmax(output, dim=-1)[:, 1]
# argsort
indexs = torch.argsort(output, descending=True)[:topk]
msgs = [utterances_[index] for index in indexs]
return msgs
class BERTRetrievalLoopAgent:
'''Knowledge Graph Driven Retrieval Dialog System Loop Agent'''
def __init__(self, multi_gpu, run_mode='train', lang='zh', kb=True):
super(BERTRetrievalLoopAgent, self).__init__()
self.agent = BERTRetrievalKGAgent(multi_gpu, run_mode='test', lang='zh', kb=True, local_rank=0)
self.env = BERTRetrievalEnvAgent(multi_gpu, run_mode='test', lang='zh', kb=True, local_rank=0)
def train_model(self, train_iter, mode='train', recoder=None, idx_=0):
pass
@torch.no_grad()
def test_model(self):
pass
class BERTRetrievalEnvAgent(BERTRetrievalAgent):
'''Env conversation agent which doesn"t know the kg graph but can return the reward and the next observation;
the model parameters is different from the BERTRetrievalKGAgent'''
def __init__(self, multi_gpu, run_mode='train', lang='zh', kb=True, local_rank=0, wordnet=None, talk_samples=128):
super(BERTRetrievalEnvAgent, self).__init__(multi_gpu, run_mode=run_mode, lang=lang, kb=kb, local_rank=local_rank)
self.args['done_reward'], self.args['smooth_penalty'], self.args['step_penalty'] = 100, 20, 5
self.wordnet = wordnet
self.args['talk_sample'] = talk_samples
self.lac = LAC(mode='lac')
def reset(self):
self.history = []
def wrap_utterances(self, context, max_len=0):
'''context is a list of string, which contains the dialog history'''
context, response = ' [SEP] '.join(context[:-1]), context[-1]
# construct inpt_ids, token_type_ids, attn_mask
inpt_ids = self.vocab.batch_encode_plus([context, response])['input_ids']
context_inpt_ids, responses_inpt_ids = inpt_ids[0], inpt_ids[1]
context_token_type_ids = [0] * len(context_inpt_ids)
responses_token_type_ids = [1] * len(responses_inpt_ids)
# length limitation
inpt_ids, token_type_ids = context_inpt_ids + responses_inpt_ids[1:], context_token_type_ids + responses_token_type_ids[1:]
if len(p1) > max_len:
cut_size = len(p1) - max_len + 1
inpt_ids = torch.LongTensor([inpt_ids[0]] + inpt_ids[cut_size:])
token_type_ids = torch.LongTensor([token_type_ids[0]] + token_type_ids[cut_size:])
else:
inpt_ids = torch.LongTensor(inpt_ids)
token_type_ids = torch.LongTensor(token_type_ids)
attn_mask = torch.ones_like(inpt_ids)
if torch.cuda.is_available():
inpt_ids, token_type_ids, attn_mask = inpt_ids.cuda(), token_type_ids.cuda(), attn_mask.cuda()
return inpt_ids, token_type_ids, attn_mask
@torch.no_grad()
def talk(self, msgs, topic=None):
self.model.eval()
utterances_, inpt_ids, token_type_ids, attn_mask = self.process_utterances(
topic, msgs, max_len=self.args['max_len'],
)
# prepare the data input
output = self.model(inpt_ids, token_type_ids, attn_mask) # [B, 2]
output = F.softmax(output, dim=-1)[:, 1] # [B]
item = torch.argmax(output).item()
msg = utterances_[item]
return msg
@torch.no_grad()
def get_reward(self, context, done=False, steps=0):
'''construct the reward'''
self.model.eval()
if done:
return self.args['done_reward'] - steps * self.args['step_penalty']
else:
output = self.model(*self.wrap_utterances(context, max_len=self.args['max_len'])) # [2]
output = F.softmax(output, dim=-1)[0]
reward = -self.args['smooth_penalty'] * output.item()
return reward
def extract_topic_words(self, utterance):
def filter(word, tag):
def isChinese():
for ch in word:
if not '\u4e00' <= ch <= '\u9fff':
return False
return True
def HaveDigital():
if bool(re.search(r'\d', word)):
return False
else:
return True
def Length():
if 1 < len(word) < 5:
return True
else:
return False
def HaveAlpha():
for ch in word:
if ch.encode().isalpha():
return False
return True
def Special():
for ch in word:
if ch in set('一二三四五六七八九十月日周年区东西南北。,|;“”‘’!~·、:=-+#¥%……&*()【】@?.,?[]{}()!$^`";:'):
return False
return True
def CheckTag():
if tag in set(['n', 'nz', 'nw', 'v', 'vn', 'a', 'ad', 'an', 'ORG', 'PER', 'LOC']):
return True
else:
return False
def InWordNet():
if word in self.wordnet.nodes:
return True
else:
return False
return isChinese() and HaveDigital() and Length() and HaveAlpha() and Special() and CheckTag() and InWordNet()
words, tags = self.lac.run(utterance)
topic = []
for word, tag in zip(words, tags):
if filter(word, tag):
topic.append(word)
return list(set(topic))
@torch.no_grad()
def get_res(self, data):
'''return reward and next utterances for the BERTRetrievalEnvAgent'''
msgs = [i['msg'] for i in data['msgs']]
# NOTE: in order to make sure the user speak based on the given conversation, use the topic for coarse ranking
# topic = self.extract_topic_words(msgs[-1])
topic = None
msgs = ' [SEP] '.join(msgs)
res = self.talk(msgs, topic=topic)
self.history.append(res)
return res
class BERTRetrievalKGGreedyAgent(BERTRetrievalAgent):
'''fix the talk function for BERTRetrievalAgent
Agent knows the whole knowledge graph path; but the other one doesn"t;
greedy: ACL 2019 Target-Guided Open-Domain Conversation'''
def __init__(self, multi_gpu, run_mode='train', lang='zh', kb=True, local_rank=0, wordnet=None, talk_samples=128):
super(BERTRetrievalKGGreedyAgent, self).__init__(multi_gpu, run_mode=run_mode, lang=lang, kb=kb, local_rank=local_rank)
self.topic_history = []
self.wordnet = wordnet
self.args['talk_samples'] = talk_samples
self.w2v = gensim.models.KeyedVectors.load_word2vec_format(
'data/chinese_w2v_base.txt', binary=False
)
def reset(self, target, source):
self.args['target'], self.args['current_node'] = target, source
self.topic_history, self.history = [source], []
print(f'[! Reset the KG target] source: {source}; target: {target}; path: {path}')
def search_candidates(self, msgs, nodes):
'''Noted that input node maybe multiple topic words
f(n) = g(n) + h(n)
1. must have the path to the target √
2. compared with current node, more closer to the target √
3. retrieval utterance must contain both current and candidate node
4. as for g(n), 1) word similarity; 2) average bag of utterances coherence
5. as for f(n), 2) number of the retrieval utterance based on the current node and candidate node and their corresponding average coherence; 3) | |
<gh_stars>0
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import dizge.competence.phonology as p
import dizge.competence.lexicon as lex
# NORMALIZATION
normalizationChars = {
"!": "",
'"': "",
"#": "",
"$": "",
"%": "",
"&": "",
"\\": "",
"'": "",
"(": "",
")": "",
"*": "",
"+": "",
",": "",
"-": "",
".": "",
"/": "",
":": "",
";": "",
"<": "",
"=": "",
">": "",
"?": "",
"@": "",
"[": "",
"\\": "",
"]": "",
"^": "",
"_": "",
"`": "",
"{": "",
"|": "",
"}": "",
"~": ""
}
loanSignals = {
"â": "a",
"ê": "e",
"î": "i",
"ô": "ö",
"û": "ü"
}
#
def softG(word):
"""
:param word:
:return:
"""
softGCount = word.count("ğ")
i = 0
word2 = word
while i < softGCount:
indexSoftG = word2.index("ğ", 1)
final = 1 if word2[-1] == "ğ" else 0
previousP = indexSoftG - 1
nextP = indexSoftG + 1
if final == 1:
word2 = word2.replace("ğ", "\u02D0", 1)
elif "yığıl" in word2:
word2 = word2.replace("yığıl", "yıoul", 1)
elif p.isVowel(word2[previousP]) and p.isVowel(word2[nextP]):
if (p.isUnrounded(word2[previousP]) and p.isFront(word2[previousP])) and (
p.isUnrounded(word2[nextP]) and p.isFront(word2[nextP])):
word2 = word2.replace("ğ", "y", 1)
elif word2[previousP] == word2[nextP]:
word2 = word2.replace("ğ", "\u02D0", 1)
word2 = word2[:nextP] + word2[nextP + 1:]
elif word2[previousP] != word2[nextP]:
word2 = word2.replace("ğ", "\u02D0", 1)
elif p.isVowel(word2[previousP]) and p.isConsonant(word2[nextP]):
if p.isUnrounded(word2[previousP]) and p.isFront(word2[previousP]):
word2 = word2.replace("ğ", "y", 1)
else:
word2 = word2.replace("ğ", "\u02D0", 1)
i += 1
return word2
def normalize(word, mode="human"):
"""
:param word:
:param mode:
:return:
"""
loanAlert = 0
word1 = word.strip().lower().replace(" ", "")
word2 = ""
for letter in word1:
if letter in normalizationChars.keys():
word2 += normalizationChars[letter]
else:
word2 += letter
word3 = ""
for letter in word2:
if letter in loanSignals.keys():
word3 += loanSignals[letter]
loanAlert = 1
else:
word3 += letter
output = word3
if "ğ" in word3 and mode == "machine":
output = softG(word3)
return output, loanAlert, word
# SYLLABLE ANALYZER
def syllable_o(word, mode="human"):
"""
:param word:
:param mode:
:return:
"""
vowels = ["a", "\u0251", "e", "\u025B", "\u0069", "I", "\u0268", "o", "\u0254", "\u00F8", "\u0153", "u", "U", "Y",
"y"]
def word2pattern(syl):
"""
:param syl:
:return:
"""
seq = ""
for phoneme in syl:
if phoneme in vowels:
seq += "V"
else:
seq += "C"
if "VV" in seq:
seq = seq.replace("VV", "V")
return seq
seq = list(normalize(word, mode)[0])
for i in range(len(seq) - 1, -1, -1):
if seq[i] in [v.grapheme for v in p.vowels]:
if (i - 1) <= 0:
continue
elif seq[i - 1] in [v.grapheme for v in p.vowels]:
continue
else:
seq[i - 1] = "." + seq[i - 1]
analyzedWord = "".join(seq)
if "." in analyzedWord:
analyzedWord = analyzedWord.split(".")
else:
analyzedWord = [analyzedWord]
if mode == "machine":
analyzedWord_seq = [word2pattern(syl) for syl in analyzedWord]
if mode == "machine":
return list(zip(analyzedWord, analyzedWord_seq))
else:
return analyzedWord
def syllable_p(word):
"""
:param word:
:return:
"""
vowels = ["a", "\u0251", "e", "\u025B", "\u0069", "I", "\u0268", "o", "\u0254", "\u00F8", "\u0153", "u", "U", "Y",
"y"]
def word2pattern(syl):
"""
:param syl:
:return:
"""
if "d\u0292" in syl:
syl = syl.replace("d\u0292", "1")
elif "t\u0283" in syl:
syl = syl.replace("t\u0283", "2")
elif "z\u0325" in syl:
syl = syl.replace("z\u0325", "3")
seq = ""
for phoneme in syl:
if phoneme in vowels:
seq += "V"
elif phoneme == "\u02D0" or phoneme == "\u02B0":
continue
else:
seq += "C"
if "VVV" in seq:
seq = seq.replace("VVV", "V")
elif "VV" in seq:
seq = seq.replace("VV", "V")
return seq
if isinstance(g2p(word), str):
output = g2p(word)
else:
output = g2p(word)[0]
if "d\u0292" in output:
output = output.replace("d\u0292", "1")
elif "t\u0283" in output:
output = output.replace("t\u0283", "2")
elif "p\u02B0" in output:
output = output.replace("p\u02B0", "3")
elif "t\u02B0" in output:
output = output.replace("t\u02B0", "4")
elif "k\u02B0" in output:
output = output.replace("k\u02B0", "5")
elif "c\u02B0" in output:
output = output.replace("c\u02B0", "6")
seq = list(output)
for i in range(len(seq) - 1, -1, -1):
if seq[i] in vowels:
if (i - 1) <= 0:
continue
elif seq[i - 1] in vowels or seq[i - 1] == "\u02D0" or seq[i - 1] == "\u02B0":
continue
else:
seq[i - 1] = "." + seq[i - 1]
analyzedWord = "".join(seq)
if "1" in analyzedWord:
analyzedWord = analyzedWord.replace("1", "d\u0292")
elif "2" in analyzedWord:
analyzedWord = analyzedWord.replace("2", "t\u0283")
elif "3" in analyzedWord:
analyzedWord = analyzedWord.replace("3", "p\u02B0")
elif "4" in analyzedWord:
analyzedWord = analyzedWord.replace("4", "t\u02B0")
elif "5" in analyzedWord:
analyzedWord = analyzedWord.replace("5", "k\u02B0")
elif "6" in analyzedWord:
analyzedWord = analyzedWord.replace("6", "c\u02B0")
if "." in analyzedWord:
analyzedWord = analyzedWord.split(".")
else:
analyzedWord = [analyzedWord]
analyzedWord_seq = [word2pattern(syl) for syl in analyzedWord]
return list(zip(analyzedWord, analyzedWord_seq))
def findMyIndex(syllableCounter, phonemeCounter, seq):
"""
:param syllableCounter:
:param phonemeCounter:
:param seq:
:return:
"""
total = 0
n = 0
while n < (syllableCounter - 1):
total += len(seq[n])
n += 1
total += phonemeCounter
return (total - 1)
def g2p(word):
word2 = word
"""
:param word:
:return:
"""
if "zs" in word:
word = word.replace("zs", "ss")
syllables = syllable_o(softG(word), "machine")
seq = [syllable[0] for syllable in syllables]
syllableCounter = 0
phonemeCounter = 0
output = ""
alternative = ""
for syl in seq:
syllableCounter += 1
for phoneme in syl:
phonemeCounter += 1
if phoneme == "\u02D0":
output += "\u02D0"
elif phoneme == "\u02D0":
output += "\u02D0"
elif phoneme == "p":
if p.isInitialWord(syllableCounter, phonemeCounter):
output += "p\u02B0"
else:
output += "p"
elif phoneme == "b":
output += "b"
elif phoneme == "t":
if p.isInitialWord(syllableCounter, phonemeCounter):
output += "t\u02B0"
else:
output += "t"
elif phoneme == "d":
output += "d"
elif phoneme == "k":
if p.getTongueSyllable(syl) == 1:
output += "c"
else:
output += "k"
if p.isInitialWord(syllableCounter, phonemeCounter):
output += "\u02B0"
elif phoneme == "g":
if p.getTongueSyllable(syl) == 1:
output += "\u025F"
else:
output += "g"
elif phoneme == "f":
output += "f"
elif phoneme == "v":
if p.isInitialWord(syllableCounter, phonemeCounter) == False and p.isFinalWord(syllableCounter,
phonemeCounter,
seq) == False:
indexV = findMyIndex(syllableCounter, phonemeCounter, seq)
previousP = indexV - 1
nextP = indexV + 1
if p.isVowel(word[previousP]) and p.isVowel(word[nextP]):
output += "\u028B"
else:
output += "v"
else:
output += "v"
elif phoneme == "s":
output += "s"
elif phoneme == "z":
if p.isFinalWord(syllableCounter, phonemeCounter, seq):
output += "z\u0325"
else:
output += "z"
elif phoneme == "ş":
output += "\u0283"
elif phoneme == "j":
output += "\u0292"
elif phoneme == "ç":
output += "t\u0283"
elif phoneme == "c":
output += "d\u0292"
elif phoneme == "h":
if p.getTongueSyllable(syl) == 1:
output += "ç"
else:
output += "x"
elif phoneme == "y":
output += "j"
elif phoneme == "m":
output += "m"
elif phoneme == "n":
if p.isFinalWord(syllableCounter, phonemeCounter, seq):
output += "n"
else:
indexN = findMyIndex(syllableCounter, phonemeCounter, seq)
nextN = indexN + 1
if word[nextN] in ["v", "f", "k"] and word in ["cankurtaran", "enfes", "erkenvarmak", "envanter"]:
output += "\u0271"
elif word[nextN] in ["k", "g"] and word not in ["cankurtaran", "enfes", "erkenvarmak", "envanter"]:
output += "\u014B"
else:
output += "n"
elif phoneme == "r":
if p.isInitialWord(syllableCounter, phonemeCounter):
output += "r"
elif p.isFinalWord(syllableCounter, phonemeCounter, seq):
output += "\u0263"
else:
output | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
import pickle
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from sklearn.model_selection import GridSearchCV
# Load dataset using pickle
def load_data(name):
'''
Input
-----
name: string specifying the path to the .pkl file to load
Output
------
data: The loaded pickled dataset. The default dataset coming with this repo
(data/net_data.pkl) is a list of dictionaries summarizing brain
nertwork data.
Each dictionary contains the following keys and values:
'C' : C a 2D NxN numpy array representing the connections
between the N areas. C[i,j] is denoting the connection from
area i to area j. 0=connection not exisiting. Values different from 0
can be continous, and in this case they reflect the strenght of the
connection, or binary, that is, 1 denoting the presence of conenctions.
'Dist' : Dist a 2D NxN numpy array representing the physical
distances between the N areas (in mm). Dist[i,j] is denoting the
physical distance from area i to area j.
'Delta' : Delta a 2D NxN numpy array representing the
similarity of areas in terms of their cytological composition.
This can be categorical or continous. Delta[i,j] is denoting the
difference of area i from area j in terms of their cytological status
'Names' : Names a list of strings denoting the names of the N brain
areas.
'dataset' : data_set_name string denoting the name of the dataset.
See the following publications for a thorough description of the data:
<NAME>, <NAME>, <NAME>, <NAME> (2019)
PLoS Biol 17(3): e2005346.
https://doi.org/10.1371/journal.pbio.2005346
'''
with open(name, 'rb') as fp:
data = pickle.load(fp)
return data
# Create dependent (Y) and independent variables (X)
def create_x_y(dataset_name=None, dataset=None):
'''
Input
-----
dataset_name, string {'macaque_monkey' 'Horvat_mouse' 'marmoset_monkey'}
specifying which aspect fo the data set will be used
dataset, a list of dictionaries returned from the load_data function
Output
------
X, a 2D NxM numpy array where N is Dist.dim-len(diag(Dist)) and M=2.
Thus X contains 2 predictors and Dist.dim-len(diag(Dist)) observations.
Y, a 1D numpy array containing the connections status (absent=0, present=1
or continous variable) for Dist.dim-len(diag(Dist)) observations.
'''
for ds in dataset:
if ds['dataset'] == dataset_name: #dataset name is the desired one?
C = ds['C'] #network matrix
Dist = ds['Dist'] #distance matrix
Delta = ds['Delta'] #delta matrix (cytology difference)
idx = np.where(~np.eye(C.shape[0], dtype=bool)) #get the non-diagonal elements
Y = C[idx] #dependent variable (connections in the network)
X = np.vstack((Dist[idx], Delta[idx])) #predictors (distance and cytology)
X = X.T
return X, Y
# Use cross validation to perform logistic regression based predictions
def cv_logistic_regr(X=None, Y=None,
cv=None, scaler=None):
'''
Input
-----
X, a 2D NxM numpy array where M are the feaatures/predictors and N the
observations. Function create_x_y can be used for returning X
Y, a 1D numpy array with N observations
cv, a cross validation iterator (see scikit-learn for full documentation)
scaler, a scaler that transform features by scaling each feature to a
given range (see scikit-learn for full documentation).
Output
------
all_auc, list with len=nr of fold (specified in the cv) keeping the
area under curve of the ROC curve computed as each fold
all_ap, same as above but keeping the average precision scores
all_tpr, a list of len len=nr of fold (specified in the cv) of numpy arrays
keeping the true positive rates for each fold. The shape of the numpy
array is determined by the threshods used for computing the tpr
(see roc_curve from scikit-learn)
all_fpr, same as above but for the false positive rate
all_prec, same as above but for precision
all_recall same as above but for recall
'''
# Logistic regression
log_regr = LogisticRegression(max_iter=1000)
all_tpr = []
all_fpr = []
all_auc = []
all_prec = []
all_recall = []
all_ap = []
# Run the cross validation following the strategy specific by cv
for i, (train, test) in enumerate(cv.split(X, Y)):
X_train, Y_train = X[train, :], Y[train]
X_test, Y_test = X[test, :], Y[test]
# If only one feature, then reshape
if X.shape[1] == 1:
X_train = X_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
log_regr.fit(X_train, Y_train)
scores = log_regr.predict_proba(X_test)
# ROC
fpr, tpr, thresholds = metrics.roc_curve(Y_test,
scores[:, 1],
pos_label=1)
auc = metrics.auc(fpr, tpr)
# keep auc fpr and tpr across folds
all_tpr.append(tpr)
all_fpr.append(fpr)
all_auc.append(auc)
# Precision-recall
precision, recall, thresholds = metrics.precision_recall_curve(Y_test,
scores[:, 1],
pos_label=1)
ap = metrics.average_precision_score(Y_test,
scores[:, 1],
pos_label=1)
# Keep precision, recall and average precision
all_prec.append(precision)
all_recall.append(recall)
all_ap.append(ap)
return all_auc, all_ap, all_tpr, all_fpr, all_prec, all_recall
# Visualize curves with seaborn (ROC or precision recall)
def visualize_curve(x, y,
metrics=None,
metric_name=None,
x_label=None,
y_label=None,
title=None,
file_name=None,
path_save=None):
'''
Input
-----
x, a list of numpy arrays denoting the value along the x axis for plotting
the curve (e.g., fpr)
y, same as above but for the y axis
Note: a seperate curve will be plotted for each item/numpy array in the
list. Thus, total nr of curves=len(x)=len(y)
metric_name, string denoting the name of the metric that the curves
represent (e.g., ROC, Precision-recall)
x_label, string denoting the label for the x axis
y_label, same as above but for the y axis
title, same as above but for the title of the figure
file_name, string denoting the name of the file of the saved figure
(default=None, no file is saved)
path_save, string denoting the name of the folder path of the saved figure
(default=None, no file is saved)
'''
sns.set(font_scale=1)
sns.set_style('white')
fig, ax = plt.subplots()
for current_x, current_y in zip(x,y):
ax.plot(current_x, current_y,
lw=2, alpha=.8)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
mean_metrics = np.mean(metrics)
std_metrics = np.std(metrics)
label_metrics='(' + metric_name + '= %0.2f $\pm$ %0.2f)' % (mean_metrics, std_metrics)
ax.set_title(title + ' ' + label_metrics)
# If a path is specififed, then save the figure as .svg
if path_save is not None:
file_name = file_name + '.svg'
file_to_save= path_save / file_name
plt.savefig(file_to_save, format='svg')
# Visualize boxplot with seaborn
def visualize_data_frame(df=None, filters=None,
xlabel=None, ylabel=None,
file_name=None, path_save=None,
palette=None
):
'''
Input
-----
df, dataframe of the data to be plotted as a boxplot (seaborn documentation)
filter, dict with keys that corresponding to the categorical values of the
dataframe and values correspoding to the valeus that should be
retained in the dataframe.
Example: If prior to the use of this visualization function
this filter is used and passed as an argument
filters = {
'grouping': 'cyto'
}
the only information linked to the grouping variable 'cyto' will
be visualized
x_label, string denoting the label for the x axis
y_label, same as above but for the y axis
file_name, string denoting the name of the file of the saved figure
(default=None, no file is saved)
path_save, string denoting the name of the folder path of the saved figure
(default=None, no file is saved)
palette, palette name, list, or dict, optional (see seaborn documentation)
'''
# reduce the dataframe by keeping only the rows with the column
# values specified in filters
if filters is not None:
for key, value in filters.items():
df = df[(df[key] == value)]
fig = plt.figure()
fig.set_size_inches(10, 10)
sns.set(font_scale=2)
sns.set_style('white')
ax = sns.boxplot(
x='grouping',
y='values',
data=df,
palette=palette
)
ax.set(xlabel=xlabel, ylabel=ylabel)
# If a path is specififed, then save the figure as .svg
if path_save is not None:
file_name = file_name + '.svg'
file_to_save= path_save / file_name
plt.savefig(file_to_save, format='svg')
# Perform random forest regression
def rand_forest_regr(X=None, Y=None,
param_grid=None, | |
<filename>scripts/helpers.py
# helper functions
import numpy as np
def gradient_descent(y, tx, initial_w, max_iters, gamma):
"""
Gradient descent algorithm.
inputs:
y = labels
tx = feature matrix
initial_w = vector of initial weights
max_iters = number of maximum iterations on the loop
gamma : Step size of the iterative method
output:
ws = weights corresponding to logistic regression solution
losses = mse loss corresponding to the logistic regression solution
"""
ws = []
losses = []
weight = initial_w
ws.append(weight)
for n_iter in range(max_iters):
# compute loss, gradient
gradient = compute_gradient(y,tx,weight)
loss = compute_loss(y,tx,weight)
if np.isinf(loss):
raise ValueError("Infinite loss with gamma %.0e, exiting."%gamma)
# gradient w by descent update
weight = weight - gamma * gradient
# store w and loss
ws.append(weight)
losses.append(loss)
return losses, ws
def build_poly(x, degree):
"""
Polynomial basis functions for input data x, for j=0 up to j=degree.
builds feature matrix
inputs:
x : features matrix X
degree : degree used to create the polynomial
output:
poly : the features matrix X after polynomial expansion
"""
poly = np.ones((len(x), 1))
for deg in range(1, degree+1):
poly = np.c_[poly, np.power(x, deg)]
return poly
def compute_stoch_gradient(y, tx, w):
"""
Compute a stochastic gradient from just few examples n and their corresponding y_n labels.
inputs:
y = labels
tx = feature matrix
w: weight
output:
gradient : Gradient for loss function of Mean Squared Error evaluated in w
"""
N = len(y)
e = y - tx.dot(w)
gradient = -tx.T.dot(e) / N
return gradient
def stochastic_gradient_descent(y, tx, initial_w, batch_size, max_iters, gamma):
"""
Stochastic gradient descent algorithm., uses batch_iter algorithm
inputs:
y = labels
tx = feature matrix
initial_w = vector of initial weights
max_iters = number of maximum iterations on the loop
gamma : Step size of the iterative method
outputs:
ws = weights corresponding to stochastic regression solution
"""
ws = [initial_w]
w = initial_w
for n_iter in range(max_iters):
#For SGD, the batch has size 1
for y_batch, tx_batch in batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):
# compute a stochastic gradient and loss
gradient = compute_stoch_gradient(y_batch,tx_batch,w)
# update w through the stochastic gradient update
w = w - gamma * gradient
# store w
ws.append(w)
return ws
def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):
"""
Generate a minibatch iterator for a dataset.
Takes as input two iterables (here the output desired values 'y' and the input data 'tx')
Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.
Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.
Example of use :
for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):
<DO-SOMETHING>
inputs:
y = labels
tx = feature matrix
batch_size = data points used included in the batch
num_batches= Number of batches to be generated
shuffle=True
output;
Iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`
"""
data_size = len(y)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_y = y[shuffle_indices]
shuffled_tx = tx[shuffle_indices]
else:
shuffled_y = y
shuffled_tx = tx
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index != end_index:
yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]
def sigmoid(t):
"""
Apply sigmoid function on t
input:
t = Vector in which sigmoid is evaluated
output:
Sigmoid function evaluated in t
"""
return 1.0 / (1 + np.exp(-t))
def new_labels(w, tx):
"""
Generates class predictions given weights, and a test data matrix
input:
w = weight
tx = feature matrix
output
y_pred :class predictions given weights, and a test data matrix
"""
y_prediction = tx.dot(w)
y_prediction[np.where(y_prediction <= 0.5)] = 0
y_prediction[np.where(y_prediction > 0.5)] = 1
return y_prediction
def compute_gradient(y, tx, w, kind='mse'):
"""
Compute the gradient
inputs:
y = labels
tx = feature matrix
w : weight
kind : mse or mae
output:
Gradient for loss function evaluated in w
raise : NotImplementedError
"""
error = y - tx.dot(w)
if kind == 'mse':
return -tx.T.dot(error)/len(y)
elif kind == 'mae':
return -np.sign(error).dot(tx)/len(y) #Sum rows
else:
raise NotImplementedError
def calculate_gradient(y, tx, w):
"""
Compute the gradient of the negative log likelihood loss function
inputs:
y = labels
tx = feature matrix
w = weight
output:
out = gradient of the negative log likelihood loss function
"""
probLabel = sigmoid(tx.dot(w))
grad = tx.T.dot(probLabel-y)
return grad
def compute_loss(y, tx, w, kind='mse'):
"""
Computes the loss, based on the cost function specified
inputs:
y = labels
tx = feature matrix
w : weight
kind: mae or mse
output:
the loss
raise NotImplementedError
"""
error = y - tx.dot(w)
if kind == 'mse':
return 1/2*np.mean(error**2)
elif kind == 'mae':
return sum(np.abs(error))/len(y)
else:
raise NotImplementedError
def calculate_loss(y, tx, w):
"""
Compute the cost by negative log likelihood
inputs:
y = labels
tx = feature matrix
w : weight
output:
out = loss value by negative log likelihood evaluated in w
"""
probLabel = sigmoid(tx.dot(w))
loss = y.T.dot(np.log(probLabel)) + (1-y).T.dot(np.log(1-probLabel))
return np.squeeze(-loss)
def calculate_loss_reg(y, tx, w, lambda_):
"""
Compute the cost by negative log likelihood for Regularized Logistic Regression
inputs:
y = labels
tx = feature matrix
lambda_: Regularization parameter
output:
Loss value by negative log likelihood evaluated in w for Regularized Logistic Regression
"""
n = tx.shape[0]
out= calculate_loss(y, tx, w) + (lambda_ / (2 * n)) * np.power(np.linalg.norm(w), 2)
return out
def penalized_logistic_regression(y, tx, w, lambda_):
"""
Return the loss and gradient by the algorithm of the penalized logistic regression
inputs:
y = labels
tx = feature matrix
w : weight
lambda_: Regularization parameter
output:
loss
gradient;
"""
num_samples = y.shape[0]
loss = calculate_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))
grad = calculate_gradient(y, tx, w) + 2 * lambda_ * w
return loss, grad
def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):
"""
One step of gradient descent, using the penalized logistic regression.
inputs:
y = labels
tx = feature matrix
w = weight
gamma = Step size of the iterative method
lambda_ = Regularization parameter
output:
w = updated w after 1 step of gradient descent for penalized logistic regression
loss = after 1 step of gradient descent for penalized logistic regression
norm of the gradient
"""
loss, gradient = penalized_logistic_regression(y, tx, w, lambda_)
w -= gamma * gradient
return w, loss, np.linalg.norm(gradient)
def predict_labels(w, data):
"""
Generates class predictions given weights, and a test data matrix for Least Squares
inputs :
w: weights
data: the test data
output:
y_prediction : predictions for w and the data matrix for Least Squares
"""
y_prediction = np.dot(data, w)
y_prediction[np.where(y_prediction <= 0)] = -1
y_prediction[np.where(y_prediction > 0)] = 1
return y_prediction
def predict_labels_log(weights, data):
"""
Generates class predictions given weights, and a test data matrix for Log
inputs :
w: weights
data: the test data
output:
y_prediction : predictions for w and the data matrix for Least Squares
"""
y_prediction = np.dot(data, weights)
y_prediction[np.where(y_prediction <= 0.5)] = -1
y_prediction[np.where(y_prediction > 0.5)] = 1
return y_prediction
def calculate_logistic_loss(y, tx, w):
"""Compute the cost by negative log-likelihood.
inputs :
y = labels
tx = feature matrix
w: weights
output:
cost by negative log likehood
"""
pred = sigmoid(tx.dot(w))
correction_factor = 1e-10;
loss = y.T.dot(np.log(pred + correction_factor)) + (1.0 - y).T.dot(np.log((1.0 - pred)+ correction_factor))
return np.squeeze(-loss) #removes single dimensional entries
def calculate_logistic_gradient(y, tx, w):
"""Compute the gradient of loss for sigmoidal prediction.
inputs :
y = labels
tx = feature matrix
w: weights
output:
grad : logistic grad"""
pred = sigmoid(tx.dot(w))
err = pred - y
grad = np.transpose(tx) @ err
return grad
def learning_by_gradient_descent(y, tx, w, gamma):
"""
Do one step of gradient descent using logistic regression.
inputs :
y = labels
tx = feature matrix
w: weights
gamma = Step size of the iterative method
output:
loss
w
"""
# compute the cost/loss
loss = calculate_loss(y,tx,w)
# compute the gradient
grad = calculate_gradient(y,tx,w)
# update | |
###############################################################################
##
## Copyright 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['Cases']
## The set of cases we construct and export from this module.
## Everything else is private.
Cases = []
import json
from zope.interface import implementer
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from autobahn.twisted.websocket import connectWS
from autobahn.wamp import WampClientFactory, WampCraClientProtocol
from autobahntestsuite.testrun import TestResult
from autobahntestsuite.util import AttributeBag, perf_counter
from autobahntestsuite.interfaces import ITestCase
#### BEGIN OF CONFIG
###############################################################################
##
## WampCase 2.1.*
##
###############################################################################
## the topic our test publisher will publish to
##
TOPIC_PUBLISHED_TO = "http://example.com/simple"
## some topic the test publisher will NOT publish to
##
TOPIC_NOT_PUBLISHED_TO = "http://example.com/foobar"
## topic that we will publish to, but that is not
## registered on the testee, and hence no events
## shall be dispatched
##
TOPIC_NOT_REGISTERED = "http://example.com/barbaz"
## for each peer, list of topics the peer subscribes to
## the publisher is always the first peer in this list
##
PEERSET0_1 = [
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO, TOPIC_NOT_PUBLISHED_TO],
[TOPIC_NOT_PUBLISHED_TO],
[]
]
PEERSET0_2 = [
[],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO, TOPIC_NOT_PUBLISHED_TO],
[TOPIC_NOT_PUBLISHED_TO],
[]
]
PEERSET0_3 = [
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO, TOPIC_NOT_PUBLISHED_TO],
[TOPIC_NOT_PUBLISHED_TO],
[]
]
PEERSET0_4 = [
[TOPIC_NOT_REGISTERED],
[TOPIC_NOT_REGISTERED],
[TOPIC_NOT_REGISTERED, TOPIC_NOT_PUBLISHED_TO],
[TOPIC_NOT_PUBLISHED_TO],
[]
]
PEERSET0_5 = [
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO]
]
SETTINGS0 = [
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
(PEERSET0_1, TOPIC_PUBLISHED_TO, None, None, None, [1, 2]),
(PEERSET0_2, TOPIC_PUBLISHED_TO, None, None, None, [1, 2]),
(PEERSET0_3, TOPIC_NOT_REGISTERED, None, None, None, []),
(PEERSET0_4, TOPIC_NOT_REGISTERED, None, None, None, []),
(PEERSET0_5, TOPIC_PUBLISHED_TO, None, None, None, [1, 2, 3, 4, 5, 6, 7, 8, 9]),
]
PAYLOADS0 = [
[None],
[100],
[-0.248], # value has exact representation in _binary_ float (JSON is IEEE binary)
[-1000000],
["hello"],
[True],
[False],
[666, 23, 999],
[{}, [], None],
[100, "hello", {u'foo': u'bar'}, [1, 2, 3], ["hello", 20, {'baz': 'poo'}]]
]
###############################################################################
##
## WampCase 2.2.*
##
###############################################################################
TOPIC_PUBLISHED_TO = "http://example.com/simple"
PEERSET1 = [
[TOPIC_PUBLISHED_TO],
[TOPIC_PUBLISHED_TO]
]
SETTINGS1 = [
##
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
##
(PEERSET1, TOPIC_PUBLISHED_TO, None, None, None, [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, None, None, [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, None, None, [0, 1]),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [], None, [0, 1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [], None, [0, 1]), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, False, [], None, [0, 1]),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0], None, [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0], None, [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0], None, [1]), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, None, [1], None, [0]), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, True, [1], None, [0]), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, False, [1], None, [0]), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0, 1], None, []), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0, 1], None, []), # exclude has precedence over excludeMe !
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0, 1], None, []), # exclude has precedence over excludeMe !
##
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
##
(PEERSET1, TOPIC_PUBLISHED_TO, None, None, [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, None, [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, None, [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, None, None, [0, 1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, None, [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, None, [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, None, [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, None, [0, 1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, None, [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, None, [0], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, None, [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, None, [0, 1], [0, 1]),
##
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
##
(PEERSET1, TOPIC_PUBLISHED_TO, None, [], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [], [0], [0]), # !!
(PEERSET1, TOPIC_PUBLISHED_TO, None, [], [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [], [0, 1], [0, 1]), # !!
(PEERSET1, TOPIC_PUBLISHED_TO, True, [], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [], [0], [0]), # !!
(PEERSET1, TOPIC_PUBLISHED_TO, True, [], [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [], [0, 1], [0, 1]), # !!
(PEERSET1, TOPIC_PUBLISHED_TO, False, [], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [], [0], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [], [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [], [0, 1], [0, 1]),
##
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
##
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0], [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0], [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0], [0, 1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0], [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0], [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0], [0, 1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0], [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0], [1], [1]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0], [0, 1], [1]),
##
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
##
(PEERSET1, TOPIC_PUBLISHED_TO, None, [1], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [1], [0], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [1], [1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [1], [0, 1], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [1], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [1], [0], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [1], [1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [1], [0, 1], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [1], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [1], [0], [0]),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [1], [1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [1], [0, 1], [0]),
##
## (peers, publicationTopic, excludeMe, exclude, eligible, expectedReceivers)
##
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0, 1], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0, 1], [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0, 1], [1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, None, [0, 1], [0, 1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0, 1], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0, 1], [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0, 1], [1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, True, [0, 1], [0, 1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0, 1], [], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0, 1], [0], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0, 1], [1], []),
(PEERSET1, TOPIC_PUBLISHED_TO, False, [0, 1], [0, 1], []),
]
## The event payloads the publisher sends in one session.
##
## Note: be aware of JSON roundtripping "issues" like
## (ujson.loads(ujson.dumps(0.1234)) == 0.1234) => False
##
PAYLOADS1 = [["Hello, world!"]]
#### END OF CONFIG
class WampCase2_2_x_x_Protocol(WampCraClientProtocol):
def onSessionOpen(self):
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, "WAMP session opened to <strong>%s</strong> at <strong>%s</strong>." % (self.session_server, self.peer)))
if self.test.testee.auth:
d = self.authenticate(**self.test.testee.auth)
d.addCallbacks(self.onAuthSuccess, self.onAuthError)
else:
self.main()
def sendMessage(self, payload):
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, '<pre class="wamp">TX => %s</pre>' % payload))
WampCraClientProtocol.sendMessage(self, payload)
def onMessage(self, payload, binary):
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, '<pre class="wamp">RX <= %s</pre>' % payload))
WampCraClientProtocol.onMessage(self, payload, binary)
def onAuthSuccess(self, permissions):
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, "WAMP session %s authenticated with credentials: <pre>%s</pre>" % (self.session_id, self.test.testee.auth)))
self.main()
def onAuthError(self, e):
uri, desc, details = e.value.args
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, "WAMP authentication error: %s" % details))
print "Authentication Error!", uri, desc, details
def main(self):
subscribeTopics = self.test.params.peers[self.factory.peerIndex]
for topic in subscribeTopics:
topic += self.factory.test._uriSuffix
self.subscribe(topic, self.onEvent)
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, "Subscribed to <pre>%s</pre>" % topic))
self.factory.onReady.callback(self.session_id)
def onEvent(self, topic, event):
self.test.result.log.append((perf_counter(), self.factory.peerIndex, self.session_id, "Received event for topic <pre>%s</pre> and payload <pre>%s</pre>" % (topic, event)))
if not self.test.result.observed.has_key(self.session_id):
self.test.result.observed[self.session_id] = []
self.test.result.observed[self.session_id].append((topic, event))
class WampCase2_2_x_x_Factory(WampClientFactory):
protocol = WampCase2_2_x_x_Protocol
def __init__(self, test, peerIndex, onReady, onGone):
WampClientFactory.__init__(self, test.testee.url)
self.test = test
self.peerIndex = peerIndex
self.onReady = onReady
self.onGone = onGone
self.proto = None
def buildProtocol(self, addr):
proto = self.protocol()
proto.factory = self
proto.test = self.test
proto.session_id = None
self.proto = proto
return proto
def clientConnectionLost(self, connector, reason):
reason = str(reason.value)
if self.proto and hasattr(self.proto, 'session_id'):
sid = self.proto.session_id
else:
sid = None
self.test.result.log.append((perf_counter(), self.peerIndex, sid, "Client connection lost: %s" % reason))
self.onGone.callback(None)
def clientConnectionFailed(self, connector, reason):
reason = str(reason.value)
self.test.result.log.append((perf_counter(), self.peerIndex, None, "Client connection failed: %s" % reason))
self.onGone.callback(reason)
class WampCase2_2_x_x_Params(AttributeBag):
"""
Test parameter set for configuring instances of WampCase2_*_*.
peers: a list with one item per WAMP session run during the test, where each item contains a list of topics each peer _subscribes_ to. The publisher that publishes during the test is always the first item in the list.
publicationTopic, excludeMe, exclude, eligible: parameters controlling how events are published during the test.
eventPayloads: a list of payloads each tested as event payload to the test at hand.
expectedReceivers: a list of session indices, where each index references | |
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.fftpack import fftn, fftshift, ifftshift
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class DiffusionSpectrumModel(OdfModel, Cache):
def __init__(self,
gtab,
qgrid_size=17,
r_start=2.1,
r_end=6.,
r_step=0.2,
filter_width=32,
normalize_peaks=False):
r""" Diffusion Spectrum Imaging
The theoretical idea underlying this method is that the diffusion
propagator $P(\mathbf{r})$ (probability density function of the average
spin displacements) can be estimated by applying 3D FFT to the signal
values $S(\mathbf{q})$
..math::
:nowrap:
\begin{eqnarray}
P(\mathbf{r}) & = & S_{0}^{-1}\int S(\mathbf{q})\exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{r}
\end{eqnarray}
where $\mathbf{r}$ is the displacement vector and $\mathbf{q}$ is the
wavector which corresponds to different gradient directions. Method
used to calculate the ODFs. Here we implement the method proposed by
Wedeen et. al [1]_.
The main assumption for this model is fast gradient switching and that
the acquisition gradients will sit on a keyhole Cartesian grid in
q_space [3]_.
Parameters
----------
gtab : GradientTable,
Gradient directions and bvalues container class
qgrid_size : int,
has to be an odd number. Sets the size of the q_space grid.
For example if qgrid_size is 17 then the shape of the grid will be
``(17, 17, 17)``.
r_start : float,
ODF is sampled radially in the PDF. This parameters shows where the
sampling should start.
r_end : float,
Radial endpoint of ODF sampling
r_step : float,
Step size of the ODf sampling from r_start to r_end
filter_width : float,
Strength of the hanning filter
References
----------
.. [1] <NAME>. al, "Mapping Complex Tissue Architecture With
Diffusion Spectrum Magnetic Resonance Imaging", MRM 2005.
.. [2] <NAME>. al, "Deconvolution in Diffusion
Spectrum Imaging", Neuroimage, 2010.
.. [3] <NAME>, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Examples
--------
In this example where we provide the data, a gradient table
and a reconstruction sphere, we calculate generalized FA for the first
voxel in the data with the reconstruction performed using DSI.
>>> from dipy.data import dsi_voxels, get_sphere
>>> data, gtab = dsi_voxels()
>>> sphere = get_sphere('symmetric724')
>>> from dipy.reconst.dsi import DiffusionSpectrumModel
>>> ds = DiffusionSpectrumModel(gtab)
>>> dsfit = ds.fit(data)
>>> from dipy.reconst.odf import gfa
>>> np.round(gfa(dsfit.odf(sphere))[0, 0, 0], 2)
0.11
Notes
------
A. Have in mind that DSI expects gradients on both hemispheres. If your
gradients span only one hemisphere you need to duplicate the data and
project them to the other hemisphere before calling this class. The
function dipy.reconst.dsi.half_to_full_qspace can be used for this
purpose.
B. If you increase the size of the grid (parameter qgrid_size) you will
most likely also need to update the r_* parameters. This is because
the added zero padding from the increase of gqrid_size also introduces
a scaling of the PDF.
C. We assume that data only one b0 volume is provided.
See Also
--------
dipy.reconst.gqi.GeneralizedQSampling
"""
self.bvals = gtab.bvals
self.bvecs = gtab.bvecs
self.normalize_peaks = normalize_peaks
# 3d volume for Sq
if qgrid_size % 2 == 0:
raise ValueError('qgrid_size needs to be an odd integer')
self.qgrid_size = qgrid_size
# necessary shifting for centering
self.origin = self.qgrid_size // 2
# hanning filter width
self.filter = hanning_filter(gtab, filter_width)
# odf sampling radius
self.qradius = np.arange(r_start, r_end, r_step)
self.qradiusn = len(self.qradius)
# create qspace grid
self.qgrid = create_qspace(gtab, self.origin)
b0 = np.min(self.bvals)
self.dn = (self.bvals > b0).sum()
self.gtab = gtab
@multi_voxel_fit
def fit(self, data):
return DiffusionSpectrumFit(self, data)
class DiffusionSpectrumFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF and other properties for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
self.model = model
self.data = data
self.qgrid_sz = self.model.qgrid_size
self.dn = self.model.dn
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
def pdf(self, normalized=True):
""" Applies the 3D FFT in the q-space grid to generate
the diffusion propagator
"""
values = self.data * self.model.filter
# create the signal volume
Sq = np.zeros((self.qgrid_sz, self.qgrid_sz, self.qgrid_sz))
# fill q-space
for i in range(len(values)):
qx, qy, qz = self.model.qgrid[i]
Sq[qx, qy, qz] += values[i]
# apply fourier transform
Pr = fftshift(np.real(fftn(ifftshift(Sq),
3 * (self.qgrid_sz, ))))
# clipping negative values to 0 (ringing artefact)
Pr = np.clip(Pr, 0, Pr.max())
# normalize the propagator to obtain a pdf
if normalized:
Pr /= Pr.sum()
return Pr
def rtop_signal(self, filtering=True):
""" Calculates the return to origin probability (rtop) from the signal
rtop equals to the sum of all signal values
Parameters
----------
filtering : boolean
default true, perform the hanning filtering
Returns
-------
rtop : float
the return to origin probability
"""
if filtering:
values = self.data * self.model.filter
else:
values = self.data
rtop = values.sum()
return rtop
def rtop_pdf(self, normalized=True):
r""" Calculates the return to origin probability from the propagator, which is
the propagator evaluated at zero (see Descoteaux et Al. [1]_, Tuch [2]_, Wu et al. [3]_)
rtop = P(0)
Parameters
----------
normalized : boolean
default true, normalize the propagator by its sum in order to obtain a pdf
Returns
-------
rtop : float
the return to origin probability
References
----------
.. [1] Descoteaux M. et. al, "Multiple q-shell diffusion propagator
imaging", Medical Image Analysis, vol 15, No. 4, p. 603-621, 2011.
.. [2] <NAME>., "Diffusion MRI of Complex Tissue Structure",
PhD Thesis, 2002.
.. [3] <NAME>. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865, 2008
"""
Pr = self.pdf(normalized=normalized)
center = self.qgrid_sz // 2
rtop = Pr[center, center, center]
return rtop
def msd_discrete(self, normalized=True):
r""" Calculates the mean squared displacement on the discrete propagator
..math::
:nowrap:
\begin{equation}
MSD:{DSI}=\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty} P(\hat{\mathbf{r}}) \cdot \hat{\mathbf{r}}^{2} \ dr_x \ dr_y \ dr_z
\end{equation}
where $\hat{\mathbf{r}}$ is a point in the 3D Propagator space (see Wu et. al [1]_).
Parameters
----------
normalized : boolean
default true, normalize the propagator by its sum in order to obtain a pdf
Returns
-------
msd : float
the mean square displacement
References
----------
.. [1] <NAME>. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
"""
Pr = self.pdf(normalized=normalized)
# create the r squared 3D matrix
gridsize = self.qgrid_sz
center = gridsize // 2
a = np.arange(gridsize) - center
x = np.tile(a, (gridsize, gridsize, 1))
y = np.tile(a.reshape(gridsize, 1), (gridsize, 1, gridsize))
z = np.tile(a.reshape(gridsize, 1, 1), (1, gridsize, gridsize))
r2 = x ** 2 + y ** 2 + z ** 2
msd = np.sum(Pr * r2) / float((gridsize ** 3))
return msd
def odf(self, sphere):
r""" Calculates the real discrete odf for a given discrete sphere
..math::
:nowrap:
\begin{equation}
\psi_{DSI}(\hat{\mathbf{u}})=\int_{0}^{\infty}P(r\hat{\mathbf{u}})r^{2}dr
\end{equation}
where $\hat{\mathbf{u}}$ is the unit vector which corresponds to a
sphere point.
"""
interp_coords = self.model.cache_get('interp_coords',
key=sphere)
if interp_coords is None:
interp_coords = pdf_interp_coords(sphere,
self.model.qradius,
self.model.origin)
self.model.cache_set('interp_coords', sphere, interp_coords)
Pr = self.pdf()
# calculate the orientation distribution function
return pdf_odf(Pr, self.model.qradius, interp_coords)
def create_qspace(gtab, origin):
""" create the 3D grid which holds the signal values (q-space)
Parameters
----------
gtab : GradientTable
origin : (3,) ndarray
center of the qspace
Returns
-------
qgrid : ndarray
qspace coordinates
"""
# create the q-table from bvecs and bvals
qtable = create_qtable(gtab)
# center and index in qspace volume
qgrid = qtable + origin
return qgrid.astype('i8')
def create_qtable(gtab):
""" create a normalized version of gradients
"""
bv = gtab.bvals
bmin = np.sort(bv)[1]
bv = np.sqrt(bv / bmin)
qtable = np.vstack((bv, bv, bv)).T * gtab.bvecs
return np.floor(qtable + .5)
def hanning_filter(gtab, filter_width):
""" create a hanning window
The signal is premultiplied by a Hanning window before
Fourier transform in order to ensure a smooth attenuation
of the signal at high q values.
Parameters
----------
gtab : GradientTable
filter_width : int
Returns
-------
filter : (N,) ndarray
where N is the number of non-b0 gradient directions
"""
qtable = create_qtable(gtab)
# calculate r - hanning filter free parameter
r = np.sqrt(qtable[:, 0] ** 2 + qtable[:, 1] ** 2 + qtable[:, 2] ** 2)
# | |
import copy
import os
import secrets
from collections import OrderedDict, defaultdict
from functools import cached_property, lru_cache
from typing import Any, Callable, List, Union
from bs4 import BeautifulSoup
# from pydispatch import dispatcher
from zineb.exceptions import FieldError, ModelExistsError
from zineb.http.responses import HTMLResponse
from zineb.models.fields import Empty, Field
from zineb.models.functions import ExpressionMixin, Math, When
from zineb.settings import settings
from zineb.utils.formatting import LazyFormat
from models.functions import (Add, Divide, ExtractDay, ExtractMonth,
ExtractYear, Multiply, Substract)
# from zineb.utils.formatting import remap_to_dict
class DataContainer:
"""
A container that regroups all the data that
has been parsed from the internet in one place.
Parameters
----------
- names: list of field names
"""
# values = defaultdict(list)
current_updated_fields = set()
def __init__(self):
self.values = defaultdict(list)
self._last_created_row = []
def __repr__(self):
return self.values
def __str__(self):
return str(dict(self.as_values()))
@classmethod
def as_container(cls, *names):
instance = cls()
for name in names:
instance.values[name]
setattr(instance, 'names', list(names))
return instance
@property
def _last_id(self) -> int:
"""
Returns the last registered ID within
the first container
Returns:
[type]: [description]
"""
container = self.get_container(self.names[0])
if not container:
return 0
return container[-1][0]
def _last_value(self, name: str):
return self.get_container(name)[-1][-1]
@property
def _next_id(self):
return self._last_id + 1
def get_container(self, name: str):
return self.values[name]
def update_last_item(self, name: str, value: Any):
container = self.get_container(name)
if isinstance(value, tuple):
container[-1] = value
else:
# TODO: Check that the id is correct
container[-1] = (self._last_id, value)
def update(self, name: str, value: Any):
"""
Adds a new value to the containers by tracking the
fields that are being updated. If the name changes,
a new row of value is generated
"""
if value == Empty:
value = None
def row_generator():
for _, field_name in enumerate(self.names, start=1):
if name == field_name:
yield (self._next_id, value)
else:
yield (self._next_id, None)
if name in self.current_updated_fields:
self.current_updated_fields.clear()
self.current_updated_fields.add(name)
self._last_created_row = None
self._last_created_row = list(row_generator())
for i, field_name in enumerate(self.names, start=1):
self.get_container(field_name).append(self._last_created_row[i - 1])
else:
self.current_updated_fields.add(name)
if self._last_created_row:
for i, field_name in enumerate(self.names, start=1):
if field_name == name:
value_to_update = list(self._last_created_row[i - 1])
value_to_update[-1] = value
self.update_last_item(field_name, tuple(value_to_update))
else:
self._last_created_row = list(row_generator())
for i, field_name in enumerate(self.names, start=1):
self.get_container(field_name).append(self._last_created_row[i - 1])
def update_multiple(self, attrs: dict):
for key, value in attrs.items():
container = self.get_container(key)
container.append((self._next_id, value))
def as_values(self):
"""
Return collected values by removing the index part
in the tuple e.g [(1, ...), ...] becomes [..., ...]
"""
container = {}
for key, values in self.values.items():
values_only = map(lambda x: x[-1], values)
container.update({key: list(values_only)})
return container
# def as_list(self):
# """
# Return a collection of dictionnaries
# e.g. [{a: 1}, {a: 2}, ...]
# """
# return list(remap_to_dict(self.as_values()))
class ModelRegistry:
"""
This class is a convienience container that remembers
the models that were created and the order in which
they were
"""
counter = 0
registry = OrderedDict()
def __getitem__(self, name: str):
return self.registry[name]
def __iter__(self):
return iter(self.models)
@property
def models(self):
return list(self.registry.values())
def add(self, name: str, model: type):
if self.has_model(name):
raise ModelExistsError(name)
self.counter = self.counter + 1
return self.registry.setdefault(name, model)
def get_model(self, name: str):
return self.registry[name]()
def has_model(self, name: str):
return name in self.registry
model_registry = ModelRegistry()
class FieldDescriptor:
"""A class that contains and stores
all the given fields of a model"""
cached_fields = OrderedDict()
def __getitem__(self, name) -> Field:
return self.get_field(name)
@cached_property
def field_names(self):
return list(self.cached_fields.keys())
# @lru_cache(maxsize=5)
def get_field(self, name) -> Field:
try:
return self.cached_fields[name]
except:
raise FieldError(name, self.field_names)
def has_fields(self, *names, raise_exception=False):
result = all(map(lambda x: x in self.field_names, names))
if raise_exception:
raise FieldError(
LazyFormat('Field does not exist: {fields}', fields=', '.join(names))
)
return result
class ModelOptions:
"""
A container that stores the options
of a given model Meta
"""
authorized_options = ['ordering', 'label']
def __init__(self, options: Union[List[tuple[str]], dict]):
# self.cached_options = OrderedDict(self._add_options(options, only_check=True))
self.cached_options = OrderedDict(options)
self.ordering_field_names = set()
self.ascending_fields = []
self.descending_fields = []
self.ordering_booleans = []
if self.has_option('ordering'):
ordering = self.get_option_by_name('ordering')
for field in ordering:
self.ordering_field_names.add(
field.removeprefix('-')
)
self.ascending_fields = [
field for field in ordering
if not field.startswith('-')
]
self.descending_fields = [
field for field in ordering
if field.startswith('-')
]
# Convert each ordering field on the
# model to Booleans. This is what a
# DataFrame accepts in order to sort
# a particular column
def convert_to_boolean(value):
if value.startswith('-'):
return False
return True
self.ordering_booleans = list(map(convert_to_boolean, ordering))
def __call__(self, options):
# old_options = copy.deepcopy(self.cached_options)
self.__init__(options)
# self.cached_options = old_options | self.cached_options
return self
def __getitem__(self, name):
return self.cached_options[name]
# def _add_options(self, options: dict, only_check: bool=False):
# if isinstance(options, list):
# options = OrderedDict(options)
# non_authorized_options = []
# def _check_option_authorized(item):
# key, _ = item
# if key.startswith('__'):
# return False
# if key in self.authorized_options:
# return True
# non_authorized_options.append(key)
# return False
# options = list(filter(_check_option_authorized, options.items()))
# if non_authorized_options:
# raise ValueError(LazyFormat(
# "Meta received an illegal option. Valid options are {options}.",
# options=', '.join(self.authorized_options)
# ))
# if only_check:
# return options
# return self.__call__(options)
def get_option_by_name(self, name):
return self.cached_options.get(name)
def has_option(self, name):
return name in self.cached_options
class Base(type):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
parents = [b for b in bases if isinstance(b, Base)]
if not parents:
return super_new(cls, name, bases, attrs)
declared_fields = set()
for key, field_obj in attrs.items():
if isinstance(field_obj, Field):
field_obj._bind(key)
declared_fields.add((key, field_obj))
descriptor = FieldDescriptor()
attrs['_fields'] = descriptor
if declared_fields:
descriptor.cached_fields = OrderedDict(declared_fields)
attrs['_fields'] = descriptor
default_options = [('label', f"models.base.{name}")]
meta = ModelOptions(default_options)
if 'Meta' in attrs:
meta_dict = attrs.pop('Meta').__dict__
# authorized_options = ['ordering', 'label']
non_authorized_options = []
def check_option(item):
key, _ = item
if key.startswith('__'):
return False
if key in meta.authorized_options:
return True
non_authorized_options.append(key)
return False
options = list(filter(check_option, meta_dict.items()))
if non_authorized_options:
raise ValueError("Meta received an illegal "
f"option. Valid options are: {', '.join(meta.authorized_options)}")
# meta = meta._add_options(meta_dict)
default_options.extend(options)
meta = meta(default_options)
attrs['_meta'] = meta
if declared_fields:
# That's where is explicitely register
# models that have declared fields and
# that are actually user created models
new_class = super_new(cls, name, bases, attrs)
model_registry.add(name, new_class)
return new_class
return super_new(cls, name, bases, attrs)
class DataStructure(metaclass=Base):
def __init__(self, html_document: BeautifulSoup=None,
response: HTMLResponse=None):
self._cached_result = DataContainer.as_container(
*self._fields.field_names
)
self.html_document = html_document
self.response = response
self.parser = self._choose_parser()
def _get_field_by_name(self, field_name) -> Field:
"""
Gets the cached field object that was registered
on the model via the FieldDescriptor
Parameters
----------
- field_name (str): the field name to get
"""
return self._fields.get_field(field_name)
def _choose_parser(self):
if self.html_document is not None:
return self.html_document
if self.response is not None:
if not isinstance(self.response, HTMLResponse):
raise TypeError(('The request object should be a '
'zineb.response.HTMLResponse object.'))
return self.response.html_page
def _add_without_field_resolution(self, field_name: str, value:Any):
"""
When the value of a field has already been
resolved, just add it to the model. This is
an internal function used for the purpose of
other internal functions.
"""
cached_values = self._cached_result.get(field_name, [])
cached_values.append(value)
self._cached_result.update({field_name: cached_values})
def add_calculated_value(self, name: str, value: Any, *funcs):
funcs = list(funcs)
for func in funcs:
if not isinstance(func, (Add, Substract, Divide, Multiply)):
raise TypeError('Function should be '
'an instance of Calculate')
setattr(func, 'model', self)
setattr(func, 'field_name', name)
if len(funcs) == 1:
func._cached_data = value
func.resolve()
self.add_value(func.field_name, func._calculated_result)
else:
for i in range(len(funcs)):
if i == 0:
funcs[0]._cached_data = value
else:
# When there a multiple functions, the
# _cached_data of the current function
# should be the _caclulat_result of the
# previous one. This technique allows
# us to run multiple expressions on
# one single value
funcs[i]._cached_data = funcs[i - 1]._cached_data
funcs[i].resolve()
# Once everything has been calculated,
# use the data of the last function to
# add the given value to the model
self.add_value(funcs[-1].field_name, funcs[-1]._cached_data)
def add_case(self, value: Any, case: Callable):
"""
Add a value to the model based on a specific
conditions determined by a When-function.
Parameters
----------
- value (Any): the value to test
- case (Callable): When-function
"""
if not isinstance(case, When):
raise TypeError('Case should be a When class.')
case._cached_data = value
case.model = self
field_name, value = case.resolve()
self.add_value(field_name, value)
def add_using_expression(self, name: str, tag: str, attrs: dict={}):
"""
Adds a value to your Model object using | |
<reponame>Vants/stampsreplacer
import enum
import numpy as np
import os
from scripts.MetaSubProcess import MetaSubProcess
from scripts.funs.PsTopofit import PsTopofit
from scripts.processes.PsEstGamma import PsEstGamma
from scripts.processes.PsFiles import PsFiles
from scripts.utils.ArrayUtils import ArrayUtils
from scripts.utils.internal.LoggerFactory import LoggerFactory
from scripts.utils.MatlabUtils import MatlabUtils
from scripts.utils.internal.ProcessCache import ProcessCache
from scripts.utils.internal.ProcessDataSaver import ProcessDataSaver
class PsSelect(MetaSubProcess):
"""Select stabile pixels that become persistent scatterer"""
__B = np.array([])
__FILE_NAME = "ps_select"
def __init__(self, ps_files: PsFiles, ps_est_gamma: PsEstGamma):
self.__PH_PATCH_CACHE = True
self.__ps_files = ps_files
self.__ps_est_gamma = ps_est_gamma
self.__logger = LoggerFactory.create("PsSelect")
self.__set_internal_params()
def __set_internal_params(self):
"""In StaMPS these where saved with setparam and getparam.
All values are that small_baseline_flag = 'N'.
In StaMPS max_desinty_rand ja max_percent_rand where two seperate varaibles, there we get
them using function __get_max_rand.
"""
self.__slc_osf = 1
self.__clap_alpha = 1
self.__clap_beta = 0.3
self.__clap_win = 32
self.__select_method = self._SelectMethod.DESINTY # DESINITY or PERCENT
# todo Why is this here
self.__gamma_stdev_reject = 0
# TODO This is [] in Stamps
self.__drop_ifg_index = np.array([])
self.__low_coh_tresh = 31 # 31/100
self.__gaussian_window = np.multiply(np.asmatrix(MatlabUtils.gausswin(7)),
np.asmatrix(MatlabUtils.gausswin(7)).conj().transpose())
class __DataDTO(object):
"""This is inner data transfer object. It is because some functions take very many
parameters, so we use this class. It is filled in load_ps_params function"""
def __init__(self, ph: np.ndarray, nr_ifgs: int, xy: np.ndarray,
da: np.ndarray, ifg_ind: np.ndarray, da_max: np.ndarray,
rand_dist: np.ndarray):
self.ph = ph
self.nr_ifgs = nr_ifgs
self.xy = xy
self.da = da
self.ifg_ind = ifg_ind
self.da_max = da_max
self.rand_dist = rand_dist
@enum.unique
class _SelectMethod(enum.Enum):
"""Internal varaible 'select_method' possible values"""
DESINTY = 1
PERCENT = 2
def start_process(self):
"""Please note that min_coh, coh_thresh and coh_thresh_ind params must be precise as
possible. Because 0.0001 offset may ruin coh_threh result"""
self.__logger.info("Start")
data = self.__load_ps_params()
max_rand = self.__get_max_rand(data.da_max, data.xy)
self.__logger.debug("max_rand: {0}".format(max_rand))
min_coh, da_mean, is_min_coh_nan_array = self.__get_min_coh_and_da_mean(
self.__ps_est_gamma.coh_ps, max_rand, data)
self.__logger.debug("min_coh.len: {0} ; da_mean.len: {1}"
.format(len(min_coh), len(da_mean)))
coh_thresh = self.__get_coh_thresh(min_coh, da_mean, is_min_coh_nan_array, data.da)
self.__logger.debug("coh_thresh.len: {0}".format(len(coh_thresh)))
coh_thresh_ind = self.__get_coh_thresh_ind(coh_thresh, data)
self.__logger.debug("coh_thresh_ind.len: {0}".format(len(coh_thresh_ind)))
ph_patch = self.__get_ph_patch(coh_thresh_ind, data)
self.__logger.debug("ph_patch.shape: {0}".format(ph_patch.shape))
coh_ps, topofit = self.__topofit(ph_patch, coh_thresh_ind, data)
self.__logger.debug("coh_ps.len: {0}".format(len(coh_ps)))
# And now we find coh_thresh again using new coh_os. For that we also need to find min_coh
# and da_mean.
min_coh, da_mean, is_min_coh_nan_array = self.__get_min_coh_and_da_mean(
coh_ps, max_rand, data)
self.__logger.debug("Second run min_coh.len: {0} ; da_mean.len: {1}"
.format(len(min_coh), len(da_mean)))
# Please note that da array is filtered by coh_thresh_ind
coh_thresh = self.__get_coh_thresh(min_coh, da_mean, is_min_coh_nan_array,
data.da[coh_thresh_ind])
self.__logger.debug("Second run coh_thresh.len: {0}".format(len(coh_thresh)))
# todo Maybe filter when you find those results
keep_ind = self.__get_keep_ind(topofit.coh_ps, coh_thresh, coh_thresh_ind,
topofit.k_ps)
self.__logger.debug("keep_ind.len: {0}"
.format(len(keep_ind)))
# Results to class variables
self.coh_thresh = coh_thresh
self.ph_patch = ph_patch
self.coh_thresh_ind = coh_thresh_ind
self.keep_ind = keep_ind
self.coh_ps = coh_ps # In StaMPS this result is overriden from last process
self.coh_ps2 = topofit.coh_ps # Find better name
self.ph_res = topofit.ph_res
self.k_ps = topofit.k_ps
self.c_ps = topofit.c_ps
self.ifg_ind = data.ifg_ind
self.__logger.debug("End")
def save_results(self, save_path: str):
ProcessDataSaver(save_path, self.__FILE_NAME).save_data(
coh_thresh=self.coh_thresh,
ph_patch=self.ph_patch,
coh_thresh_ind=self.coh_thresh_ind,
keep_ind=self.keep_ind,
coh_ps=self.coh_ps,
coh_ps2=self.coh_ps2,
ph_res=self.ph_res,
k_ps=self.k_ps,
c_ps=self.c_ps,
ifg_ind=self.ifg_ind
)
def load_results(self, load_path: str):
file_with_path = os.path.join(load_path, self.__FILE_NAME + ".npz")
data = np.load(file_with_path)
self.coh_thresh = data['coh_thresh']
self.ph_patch = data['ph_patch']
self.coh_thresh_ind = data['coh_thresh_ind']
self.keep_ind = data['keep_ind']
self.coh_ps = data['coh_ps']
self.coh_ps2 = data['coh_ps2']
self.ph_res = data['ph_res']
self.k_ps = data['k_ps']
self.c_ps = data['c_ps']
self.ifg_ind = data['ifg_ind']
def __load_ps_params(self) -> __DataDTO:
"""Finds values that are needed from ps_files and changes them a bit. It is similar to
load_ps_params method in PsEstGamma function."""
def get_da_max(da):
# todo miks 10000?
if da.size >= 10000:
da_sorted = np.sort(da, axis=0)
if da.size >= 50000:
bin_size = 10000
else:
bin_size = 2000
# bin_size - 1 is for that to take elements with correct indexes that are in Matlab
da_max = np.concatenate(
(np.zeros(1), da_sorted[bin_size - 1: -bin_size - 1: bin_size],
np.array([da_sorted[-1]])))
else:
da_max = np.array([0], [1])
da = np.ones(len(self.__ps_est_gamma.coh_ps))
return da_max, da
def filter_params_based_on_ifgs_and_master(ph: np.ndarray, bperp: np.ndarray, nr_ifgs: int):
"""Filter out master row form ph and bperp arrays"""
comp_fun = lambda x, y: x < y
no_master_ix = np.setdiff1d(np.arange(0, nr_ifgs),
self.__ps_files.master_nr - 1)
ifg_ind = np.setdiff1d(np.arange(0, nr_ifgs), self.__drop_ifg_index)
ifg_ind = np.setdiff1d(ifg_ind, self.__ps_files.master_nr)
master_ix = self.__ps_files.get_nr_ifgs_copared_to_master(comp_fun) - 1
ifg_ind[ifg_ind > master_ix] -= 1
ph = ph[:, no_master_ix]
bperp = bperp[no_master_ix]
nr_ifgs = len(no_master_ix)
return ifg_ind, ph, bperp, nr_ifgs
ph, bperp, nr_ifgs, _, xy, da = self.__ps_files.get_ps_variables()
# In StaMPS this is done when small_base_line flag is not 'y'. Beacause this process is
# made as small_baseline_flag value is 'n' we also make this always
ifg_ind, ph, bperp, nr_ifgs = filter_params_based_on_ifgs_and_master(ph, bperp, nr_ifgs)
da_max, da = get_da_max(da)
# nr_dist in StaMPS
rand_dist = self.__ps_est_gamma.rand_dist
data_dto = self.__DataDTO(ph, nr_ifgs, xy, da, ifg_ind, da_max, rand_dist)
return data_dto
def __get_max_rand(self, da_max: np.ndarray, xy: np.ndarray):
"""This function finds variable that in StaMPS is called 'max_percent_rand'.
In StaMPS this variable is read in parameters. But in this process we also change it a bit
we calculate this here"""
DEF_VAL = 20
if self.__select_method is self._SelectMethod.DESINTY:
# In Stamps min and max values are in separate arrays with a single element.
patch_area = np.prod(MatlabUtils.max(xy) - MatlabUtils.min(xy)) / 1e6 # In km
max_rand = DEF_VAL * patch_area / (len(da_max) -1)
else:
max_rand = DEF_VAL
return max_rand
def __get_min_coh_and_da_mean(self, coh_ps: np.ndarray, max_rand: float, data: __DataDTO) -> (
np.ndarray, np.ndarray, bool):
# Internal parameters because full names are bad to write and read all the time
coherence_bins = self.__ps_est_gamma.coherence_bins
rand_dist = self.__ps_est_gamma.rand_dist
array_size = data.da_max.size - 1
min_coh = np.zeros(array_size)
# In StaMPS this is size(da_max, 1) what is same as length(da_max)
da_mean = np.zeros(array_size)
for i in range(array_size):
# You can use np.all or np.logical here too. Bitwize isn't must
coh_chunk = coh_ps[(data.da > data.da_max[i]) & (data.da <= data.da_max[i + 1])]
da_mean[i] = np.mean(
data.da[(data.da > data.da_max[i]) & (data.da <= data.da_max[i + 1])])
# Remove pixels that we could not find coherence
coh_chunk = coh_chunk[coh_chunk != 0]
# In StaMPS this is called 'Na'
hist, _ = MatlabUtils.hist(coh_chunk, coherence_bins)
hist_low_coh_sum = MatlabUtils.sum(hist[:self.__low_coh_tresh])
rand_dist_low_coh_sum = MatlabUtils.sum(rand_dist[:self.__low_coh_tresh])
nr = rand_dist * hist_low_coh_sum / rand_dist_low_coh_sum # todo What does this 'nr' mean?
# In StaMPS here is also possibility to make graph
hist[hist == 0] = 1
# Percent_rand calculate
# np.flip allows to use one-dimencional arrays, thats why we don't use np.fliplr
nr_cumsum = np.cumsum(np.flip(nr, axis=0), axis=0)
if self.__select_method is self._SelectMethod.PERCENT:
hist_cumsum = np.cumsum(np.flip(hist, axis=0), axis=0) * 100
percent_rand = np.flip(np.divide(nr_cumsum, hist_cumsum), axis=0)
else:
percent_rand = np.flip(nr_cumsum, axis=0)
ok_ind = np.where(percent_rand < max_rand)[0]
if len(ok_ind) == 0:
# When coherence is over limit
min_coh[i] = 1
else:
# Here we don't need to add one to indexes because on 'ok_ind' array it is already
# done. This means that all those 'magical constants' are that where in StaMPS
min_fit_ind = MatlabUtils.min(ok_ind) - 3 # todo Why 3?
if min_fit_ind <= 0:
min_coh[i] = np.nan
else:
max_fit_ind = MatlabUtils.min(ok_ind) + 2 # todo Why 2?
# In StaMPS this is just constant 100. Not length of array.
if max_fit_ind > len(percent_rand) - 1:
max_fit_ind = len(percent_rand) - 1
x_cordinates = percent_rand[min_fit_ind:max_fit_ind + 1]
y_cordinates = ArrayUtils.arange_include_last((min_fit_ind + 1) * 0.01,
(max_fit_ind + 1) * 0.01, 0.01)
min_coh[i] = MatlabUtils.polyfit_polyval(x_cordinates, y_cordinates, 3,
max_rand)
# Check if min_coh is unusable (full of nan's
# This is bit different on StaMPS. I find min_coh'i ja da_mean in same method and in
# same time
not_nan_ind = np.where(min_coh != np.nan)[0]
is_min_coh_nan_array = sum(not_nan_ind) == 0
# When there isn't differences then we don't need to take subsets of arrays
if not is_min_coh_nan_array or (not_nan_ind == array_size):
min_coh = min_coh[not_nan_ind]
da_mean = da_mean[not_nan_ind]
return min_coh, da_mean, is_min_coh_nan_array
def __get_coh_thresh(self, min_coh: np.ndarray, da_mean: np.ndarray,
is_min_coh_nan_array: bool, da: np.ndarray):
"""Here we don't return coh_tresh_coffs'i because it is used only for graphs"""
DEF_COH_THRESH = 0.3
if is_min_coh_nan_array:
self.__logger.warn(
'Not enough random phase pixels to set gamma threshold - using default threshold of '
+ str(DEF_COH_THRESH))
# Default value is put into array for others to use. Other functions expect array
# that's why we can't use just float
coh_thresh = np.array([DEF_COH_THRESH])
else:
# Because we have already changed min_coh ja da_mean arrays
if len(min_coh) > 1:
coh_thresh_coffs = np.polyfit(da_mean, min_coh, 1)
if coh_thresh_coffs[0] > 0:
| |
)
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert (
embed_dim == embed_dim_to_check
), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
if isinstance(embed_dim, torch.Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
else:
head_dim = embed_dim // num_heads
assert (
head_dim * num_heads == embed_dim
), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert (
key.shape[:2] == value.shape[:2]
), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
else:
assert (
key.shape == value.shape
), f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
if not use_separate_proj_weight:
# q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
q, k, v = query, key, value
else:
assert (
q_proj_weight is not None
), "use_separate_proj_weight is True but q_proj_weight is None"
assert (
k_proj_weight is not None
), "use_separate_proj_weight is True but k_proj_weight is None"
assert (
v_proj_weight is not None
), "use_separate_proj_weight is True but v_proj_weight is None"
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3)
q, k, v = _in_projection(
query,
key,
value,
q_proj_weight,
k_proj_weight,
v_proj_weight,
b_q,
b_k,
b_v,
)
# prep attention mask
if attn_mask is not None:
if attn_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
attn_mask = attn_mask.to(torch.bool)
else:
assert (
attn_mask.is_floating_point() or attn_mask.dtype == torch.bool
), f"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}"
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(
f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
)
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(
f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
)
else:
raise RuntimeError(
f"attn_mask's dimension {attn_mask.dim()} is not supported"
)
# prep key padding mask
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
# add bias along batch dimension (currently second)
if bias_k is not None and bias_v is not None:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, 1))
else:
assert bias_k is None
assert bias_v is None
#
# reshape q, k, v for multihead attention and make em batch first
#
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert (
static_k.size(0) == bsz * num_heads
), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
assert (
static_k.size(2) == head_dim
), f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
k = static_k
if static_v is None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert (
static_v.size(0) == bsz * num_heads
), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
assert (
static_v.size(2) == head_dim
), f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
v = static_v
# add zero attention along batch dimension (now first)
if add_zero_attn:
zero_attn_shape = (bsz * num_heads, 1, head_dim)
k = torch.cat(
[k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1
)
v = torch.cat(
[v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1
)
if attn_mask is not None:
attn_mask = F.pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = F.pad(key_padding_mask, (0, 1))
# update source sequence length after adjustments
src_len = k.size(1)
# merge key padding and attention masks
if key_padding_mask is not None:
assert key_padding_mask.shape == (
bsz,
src_len,
), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
key_padding_mask = (
key_padding_mask.view(bsz, 1, 1, src_len)
.expand(-1, num_heads, -1, -1)
.reshape(bsz * num_heads, 1, src_len)
)
if attn_mask is None:
attn_mask = key_padding_mask
elif attn_mask.dtype == torch.bool:
attn_mask = attn_mask.logical_or(key_padding_mask)
else:
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf"))
# convert mask to float
if attn_mask is not None and attn_mask.dtype == torch.bool:
new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float)
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
attn_mask = new_attn_mask
# adjust dropout probability
if not training:
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = _scaled_dot_product_attention(
q, k, v, attn_mask, dropout_p
)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs)
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs)
)
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = NonDynamicallyQuantizableLinear(
embed_dim, embed_dim, bias=bias, **factory_kwargs
)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given | |
#!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import glob
import itertools
import math # for log
import os
import regex
import sre_compile
import string
import sys
# if empty, use defaults
_header_regex = regex.compile("a^")
# if empty, use defaults
_source_regex = regex.compile("a^")
# Files which match the regex are considered to be header
# files (and will undergo different style checks).
# This set can be extended by using the --headers
# option
def IsHeaderFile(filename):
return _header_regex.search(filename)
def IsSourceFile(filename):
return _source_regex.search(filename)
_USAGE = r"""
Syntax: cpplint.py [--repository=path]
[--headers=header_regex]
[--srcs=src_regex]
<file> [file] ...
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Flags:
repository=path
The top level directory of the repository, used to derive the header
guard CPP variable. By default, this is determined by searching for a
path that contains .git, .hg, or .svn. When this flag is specified, the
given path is used instead. This option allows the header guard CPP
variable to remain consistent even if members of a team have different
repository root directories (such as when checking out a subdirectory
with SVN). In addition, users of non-mainstream version control systems
can use this flag to ensure readable header guard CPP variables.
Examples:
Assuming that Alice checks out ProjectName and Bob checks out
ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
with no --repository flag, the header guard CPP variable will be:
Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
If Alice uses the --repository=trunk flag and Bob omits the flag or
uses --repository=. then the header guard CPP variable will be:
Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
srcs=src_regex
The regex for source files that cpplint will check
Examples:
--srcs=\.c$|\.cpp$
headers=header_regex
The regex for header files that cpplint will use
Examples:
--headers=\.h$|\.hpp$|\.inc$
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/header_guard',
'build/include_order',
'build/include_what_you_use',
'build/printf_format',
'build/storage_class',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'whitespace/blank_line',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/forcolon',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/todo',
]
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# Type names
_TYPES = regex.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/include] checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = regex.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = regex.compile(r'^\s*$', regex.DOTALL)
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = regex.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = regex.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = regex.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
if sys.version_info < (3,):
# -- pylint: disable=no-member
# BINARY_TYPE = str
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def unicode_escape_decode(x):
if sys.version_info < (3,):
return codecs.unicode_escape_decode(x)[0]
else:
return x
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, | |
<filename>my_widgets.py
import os
from collections import OrderedDict as od
import ipywidgets as ipw
from copy import deepcopy
import pandas as pd
import traitlets
try:
from tkinter import Tk, filedialog
tkinter_available = True
except:
tkinter_available =False
import helper_funcs as helpers
from traceback import format_exc
import numpy as np
import matplotlib.pyplot as plt
### WORKING
class TableEditor(object):
_base_layout = ipw.Layout(flex='0 1 auto', width='200px', height='150px')
_btn_width = "100px"
def __init__(self, df, save_dir=None, preconfig_file=None,
default_group=None, new_run_names=[], add_to_index_vars=[],
unstack_indices=[], run_level_idx=0, var_level_idx=2,
**plot_settings):
# Stuff for I/O
if save_dir is None:
save_dir = os.getcwd()
self.saveas_funs = dict(csv = self.save_csv,
xlsx = self.save_xlsx,
png = self.save_png)
self.plot_funs = dict(heatmap = self.plot_heatmap)
self.save_dir = save_dir
# the dataframe
self.df = df
self.df_edit = self.check_shape_init(df)
self._df_edit_last = self.df_edit
self.extractions = od()
self.var_level_idx = var_level_idx
self.run_level_idx = run_level_idx
# Predefined settings for things applied to dataframe
self.new_run_names_init = new_run_names
self.add_to_index_vars = add_to_index_vars
self.unstack_indices = unstack_indices
# Display of Dataframe
self.current_plot = None
self.heatmap_settings = od(cmap="bwr",
cmap_shifted=True)
# Settings for Variable selector
self.groups = od()
self.groups["flagged"] = self.flagged_vars
if preconfig_file:
self.groups.update(helpers.load_varconfig_ini(preconfig_file))
if default_group is None:
default_group = "flagged"
if not default_group in self.groups:
raise ValueError("No default group with ID {} in file {}".format(default_group, preconfig_file))
self.default_group = default_group
self.default_selection = self.groups[default_group]
self._buttons_edit_df = []
# init widgets and actions
self.init_widgets_renamer()
self.init_layout_renamer()
self.init_widgets_varselect()
self.init_layout_varselect()
self.init_layout_reshaper()
self.init_glob_widgets()
self.init_layout()
# initiate layout
self.apply_changes_rename()
self.crop_var_selection()
self.add_to_index(self.add_to_index_vars)
self.unstack(self.unstack_indices)
self.update_ui()
self.disp_current()
self.heatmap_settings.update(plot_settings)
if not tkinter_available:
self.btn_saveas.disabled = True
self.btn_saveas.tooltip = ("Please install tkinter to use this "
"feature. Until then, you can use save "
"button")
@property
def default_plot_fun(self):
return self.plot_funs["heatmap"]
@property
def column_names(self):
return list(self.df_edit.columns)
@property
def data_column_names(self):
df = self.df_edit
if isinstance(df.columns, pd.MultiIndex):
return list(df.columns.levels[0])
return list(df.columns)
@property
def index_level_names(self):
return self.df_edit.index.names
@property
def index_level_col_names(self):
return self.df_edit.columns.names[1:]
@property
def run_names(self):
#return sorted(self.df.index.get_level_values(self.level).unique().values)
return self.df_edit.index.get_level_values(self.run_level_idx).unique().values
@property
def flagged_vars(self):
lvl = self.var_level_idx
return list(self.df[self.df.Flag.astype(bool)].index.get_level_values(lvl).unique().values)
@property
def all_variables(self):
lvl = self.var_level_idx
return self.df.index.get_level_values(lvl).unique().values
def init_glob_widgets(self):
self.disp_table = ipw.Output()
self.output = ipw.Output()
btn_clear_output = ipw.Button(description="Clear output",
layout=ipw.Layout(width=self._btn_width))
btn_clear_output.on_click(self.on_clear_output)
btn_reset = ipw.Button(description="Reset",
tooltip="Reset all changes that were applied",
layout=ipw.Layout(width=self._btn_width))
btn_reset.on_click(self.on_reset)
tip = ("Save file in {} using filename specified in line above. "
"Allowed filetypes are: {}".format(self.save_dir,
list(self.saveas_funs.keys())))
btn_save = ipw.Button(description="Save",
tooltip=tip,
layout=ipw.Layout(width=self._btn_width))
btn_save.on_click(self.on_save)
btn_saveas = ipw.Button(description="Save as",
tooltip="Save current Dataframe as file",
layout=ipw.Layout(width=self._btn_width))
btn_plot = ipw.Button(description="Plot",
layout=ipw.Layout(width=self._btn_width))
btn_plot.on_click(self.on_plot)
btn_saveas.style.button_color = 'lightgreen'
btn_saveas.on_click(self.on_saveas)
self.btn_saveas = btn_saveas
self.glob_toolbar = ipw.HBox([btn_clear_output,
btn_reset,
btn_save,
btn_saveas,
btn_plot])
self.save_name = ipw.Text(placeholder='Insert save filename (e.g. test.csv)')
def init_layout(self):
self.edit_ui = ipw.Tab()
self.edit_ui.children = [self.layout_rename,
self.layout_varselect,
self.layout_reshaper]
self.edit_ui.set_title(0, "Rename run")
self.edit_ui.set_title(1, "Select variables")
self.edit_ui.set_title(2, "Reshape dataframe")
self.layout = ipw.VBox([self.edit_ui,
self.save_name,
self.glob_toolbar,
self.disp_table,
self.output],
layout = ipw.Layout(min_height="600px"))
# =============================================================================
# self.layout.children = [self.layout_varselect,
# self.layout_rename,
# self.layout_reshape,
# self.layout_display]
#
# =============================================================================
def init_widgets_renamer(self):
self.btn_apply_rename = ipw.Button(description='Apply')
self.btn_apply_rename.style.button_color = "lightgreen"
self.btn_apply_rename.on_click(self.on_click_apply_rename)
self.input_rows_rename = []
self.input_fields_rename = []
for i, name in enumerate(self.run_names):
try:
val = self.new_run_names_init[i]
except:
val = name
ipt = ipw.Text(value=val, placeholder='Insert new name',
disabled=False, layout=ipw.Layout(width='100px'))
row = ipw.HBox([ipw.Label(name, layout=ipw.Layout(width='100px')), ipt])
self.input_fields_rename.append(ipt)
self.input_rows_rename.append(row)
self._buttons_edit_df.extend([self.btn_apply_rename])
def init_layout_renamer(self):
self.layout_rename = ipw.HBox([ipw.VBox(self.input_rows_rename),
self.btn_apply_rename])
def init_widgets_varselect(self):
# Init all widgets for variable selector
self.btn_unselect_all = ipw.Button(description='Unselect all')
self.btn_select_all = ipw.Button(description='Select all')
self.btn_flagged = ipw.Button(description="Flagged")
self.btn_apply_varselect = ipw.Button(description='Apply')
self.btn_apply_varselect.style.button_color = 'lightgreen'
self.var_selector = ipw.SelectMultiple(description='',
options=self.all_variables,
value=self.default_selection,
layout=self._base_layout)
self.var_selector_disp = ipw.Textarea(value='',
description='',
disabled=True,
layout=self._base_layout)
self.group_selector = ipw.Dropdown(options=self.groups,
value=self.default_selection,
description='',
disabled=False)
# init all actions for widgets of variable selector
self.var_selector.observe(self.current_varselection)
self.group_selector.observe(self.on_change_dropdown)
#what happens when buttons are clicked
self.btn_select_all.on_click(self.on_select_all_vars_clicked)
self.btn_unselect_all.on_click(self.on_unselect_all_vars_clicked)
self.btn_apply_varselect.on_click(self.on_click_apply_varselect)
self._buttons_edit_df.extend([self.btn_select_all,
self.btn_unselect_all,
self.btn_apply_varselect])
def init_layout_varselect(self):
self.btns_varselect = ipw.VBox([self.btn_select_all,
self.btn_unselect_all,
ipw.Label(),
self.btn_apply_varselect])
l = ipw.HBox([ipw.VBox([ipw.Label("Predefined"), self.group_selector]),
ipw.VBox([ipw.Label("Index level {}".format(self.var_level_idx)),
self.var_selector]),
ipw.VBox([ipw.Label("Current selection"),
self.var_selector_disp]),
self.btns_varselect])
self.layout_varselect = l
self.current_varselection(1)
#self.layout = ipw.VBox([self.edit_area, self.output])
def init_layout_reshaper(self):
# COLUMN TO INDEX
col2idx_header = ipw.Label("Column to index")
self.col2idx_select = ipw.SelectMultiple(description='',
options=self.column_names,
value=(),
layout=self._base_layout)
col2idx_btn_apply = ipw.Button(description = "Add", layout=ipw.Layout(width=self._btn_width))
col2idx_btn_apply.on_click(self.on_add_col)
col2idx_btn_apply.tooltip = "Add selected columns to Multiindex"
col2idx_btn_apply.style.button_color = 'lightgreen'
col2idx_layout = ipw.VBox([col2idx_header,
self.col2idx_select,
ipw.HBox([col2idx_btn_apply])])
# UNSTACKING
unstack_header = ipw.Label("Unstack index")
self.unstack_select = ipw.SelectMultiple(description='',
options=self.index_level_names,
value=(),
layout=self._base_layout)
unstack_btn_apply = ipw.Button(description = "Apply", layout=ipw.Layout(width=self._btn_width))
unstack_btn_apply.on_click(self.on_unstack)
unstack_btn_apply.style.button_color = 'lightgreen'
unstack_btn_apply.tooltip = "Put selected indices into columns"
unstack_layout = ipw.VBox([unstack_header,
self.unstack_select,
ipw.HBox([unstack_btn_apply])])
# STACKING
stack_header = ipw.Label("Stack index")
self.stack_select = ipw.SelectMultiple(description='',
options=self.index_level_col_names,
value=(),
layout=self._base_layout)
stack_btn_apply = ipw.Button(description = "Apply", layout=ipw.Layout(width=self._btn_width))
stack_btn_apply.on_click(self.on_stack)
stack_btn_apply.style.button_color = 'lightgreen'
stack_btn_apply.tooltip = "Put selected indices into rows"
stack_layout = ipw.VBox([stack_header,
self.stack_select,
ipw.HBox([stack_btn_apply])])
# SELECT COLUMN
extract_header = ipw.Label("Extract column")
self.extract_select = ipw.Select(description='',
options=self.data_column_names,
layout=self._base_layout)
extract_btn_apply = ipw.Button(description="Apply",
layout=ipw.Layout(width=self._btn_width))
extract_btn_apply.on_click(self.on_extract)
extract_btn_apply.style.button_color = 'lightgreen'
extract_btn_apply.tooltip = "Extract currently selected column"
extract_btn_undo = ipw.Button(description="Undo",
layout=ipw.Layout(width=self._btn_width))
extract_btn_undo.on_click(self.on_extract_undo)
extract_btn_undo.tooltip = "Undo last column extraction"
extract_layout = ipw.VBox([extract_header,
self.extract_select,
ipw.HBox([extract_btn_undo,
extract_btn_apply])])
self.layout_reshaper = ipw.HBox([col2idx_layout,
unstack_layout,
stack_layout,
extract_layout])
self._buttons_edit_df.extend([col2idx_btn_apply,
unstack_btn_apply,
stack_btn_apply,
extract_btn_apply])
# Methods for renamer
def on_click_apply_rename(self, b):
self.apply_changes_rename()
self.disp_current()
def apply_changes_rename(self):
df = self.df_edit
mapping = od()
for i, name in enumerate(self.run_names):
repl = str(self.input_fields_rename[i].value)
mapping[name] = repl
self.df_edit = df.rename(index=mapping, level=self.run_level_idx)
self.output.append_display_data("Applying renaming: {}".format(mapping))
# Methods for variable selector
def on_unselect_all_vars_clicked(self, b):
self.unselect_all()
def on_select_all_vars_clicked(self, b):
self.select_all()
def on_change_dropdown(self, b):
self.select_current_group()
def unselect_all(self):
self.var_selector.value = ()
def select_all(self):
self.var_selector.value = self.var_selector.options
def select_current_group(self):
self.var_selector.value = self.group_selector.value
def current_varselection(self, b):
s=""
for item in self.var_selector.value:
s += "{}\n".format(item)
self.var_selector_disp.value = s
def crop_var_selection(self):
try:
self.df_edit = helpers.crop_selection_dataframe(self.df_edit,
self.var_selector.value,
levels=self.var_level_idx)
self.output.append_display_data("Applying variable selection: {}".format(self.var_selector.value))
except Exception as e:
self.output.append_display_data("WARNING: failed to extract selection.\nTraceback {}".format(format_exc()))
def on_click_apply_varselect(self, b):
self.crop_var_selection()
self.disp_current()
# Methods for reshaper
def update_ui(self):
"""Recreate user interface"""
if not isinstance(self.df_edit, pd.Series):
if isinstance(self.df_edit.columns, pd.MultiIndex):
self.col2idx_select.options = ("N/A", "Current dataframe is unstacked")
self.col2idx_select.disabled = True
for item in self.input_fields_rename:
item.disabled = True
self.btn_apply_rename.disabled=True
tip = ("Dataframe contains unstacked indices. Renaming can only be "
"applied for dataframe that has not been unstacked. You "
"may re-stack the dataframe using the tab 'Reshape dataframe'")
self.btn_apply_rename.tooltip = tip
self.btn_apply_varselect.disabled = True
self.btn_apply_varselect.tooltip = tip
else:
self.col2idx_select.options = self.column_names
self.col2idx_select.value=()
self.col2idx_select.disabled = False
for item in self.input_fields_rename:
item.disabled = False
self.btn_apply_rename.disabled=False
self.btn_apply_varselect.disabled=False
tip = ("Apply current settings")
self.btn_apply_rename.tooltip = tip
self.btn_apply_varselect.tooltip = tip
self.unstack_select.options = self.index_level_names
self.unstack_select.value = ()
self.stack_select.options = self.index_level_col_names
self.stack_select.value = ()
self.extract_select.options = self.data_column_names
self.disp_table.clear_output()
self.disp_current()
def on_add_col(self, b):
var_names = list(self.col2idx_select.value)
self.add_to_index(var_names)
self.update_ui()
def on_unstack(self, b):
level_names = list(self.unstack_select.value)
self.unstack(level_names)
self.update_ui()
def on_stack(self, b):
level_names = list(self.stack_select.value)
self.stack(level_names)
self.update_ui()
def on_extract(self, b):
val = str(self.extract_select.value)
self._df_edit_last = self.df_edit
self.df_edit = self.df_edit[val]
self.update_ui()
self.freeze_ui()
self.disp_current()
def freeze_ui(self, disable=True):
for btn in self._buttons_edit_df:
btn.disabled = disable
def on_extract_undo(self, b):
self.df_edit = self._df_edit_last
self.update_ui()
self.freeze_ui(False)
self.disp_current()
# global events
def on_clear_output(self, b):
self.output.clear_output()
def on_save(self, b):
self.save()
def on_saveas(self, b):
self.saveas()
def on_reset(self, b):
self.reset()
self.update_ui()
def on_plot(self, b):
self.plot()
def check_shape_init(self, df):
if isinstance(df.columns, pd.MultiIndex):
#print("Initial Dataframe is unstacked, stacking back")
return helpers.stack_dataframe_original_idx(df)
return deepcopy(df)
def add_to_index(self, var_names):
if isinstance(var_names, str):
var_names = [var_names]
for item in var_names:
self.df_edit = self.df_edit.set_index([self.df_edit.index, item])
def unstack(self, level_names):
self.df_edit = self.df_edit.unstack(level_names)
def stack(self, level_names):
self.df_edit = helpers.stack_dataframe(self.df_edit, level_names)
def reset(self):
self.df_edit = self.check_shape_init(self.df)
def disp_current(self):
#self.output.append_display_data(ipw.Label("PREVIEW current selection", fontsize=22))
self.disp_table.clear_output()
if isinstance(self.df_edit, pd.Series):
disp = self.df_edit
else:
disp = self.df_edit.head().style.set_caption("PREVIEW")
self.disp_table.append_display_data(disp)
#self.disp_table.append_display_data(preview)
#self.output
def plot_heatmap(self, ax):
try:
self.current_plot = helpers.df_to_heatmap(self.df_edit, ax=ax,
**self.heatmap_settings)
except Exception as e:
self.output.append_display_data("Failed to plot heatmap: Error "
"message: {}".format(repr(e)))
def plot(self):
self.disp_table.clear_output()
with self.disp_table:
fig, ax = plt.subplots(1,1, figsize=(14, 8))
| |
#!/router/bin/python
from collections import OrderedDict, namedtuple
from scapy.utils import ltoa
from scapy.error import Scapy_Exception
import random
import base64
import string
import traceback
import copy
import imp
from ..common.trex_exceptions import *
from ..common.trex_types import verify_exclusive_arg, validate_type
from ..utils.text_opts import format_num
from .trex_stl_packet_builder_interface import CTrexPktBuilderInterface
from .trex_stl_packet_builder_scapy import *
# base class for TX mode
class STLTXMode(object):
""" mode rate speed """
def __init__ (self, pps = None, bps_L1 = None, bps_L2 = None, percentage = None):
"""
Speed can be given in packets per second (pps), L2/L1 bps, or port percent
Use only one unit.
you can enter pps =10000 oe bps_L1=10
:parameters:
pps : float
Packets per second
bps_L1 : float
Bits per second L1 (with IPG)
bps_L2 : float
Bits per second L2 (Ethernet-FCS)
percentage : float
Link interface percent (0-100). Example: 10 is 10% of the port link setup
.. code-block:: python
# STLTXMode Example
mode = STLTXCont(pps = 10)
mode = STLTXCont(bps_L1 = 10000000) #10mbps L1
mode = STLTXCont(bps_L2 = 10000000) #10mbps L2
mode = STLTXCont(percentage = 10) #10%
"""
args = [pps, bps_L1, bps_L2, percentage]
# default
if all([x is None for x in args]):
pps = 1.0
else:
verify_exclusive_arg(args)
self.fields = {'rate': {}}
if pps is not None:
validate_type('pps', pps, [float, int])
self.fields['rate']['type'] = 'pps'
self.fields['rate']['value'] = pps
elif bps_L1 is not None:
validate_type('bps_L1', bps_L1, [float, int])
self.fields['rate']['type'] = 'bps_L1'
self.fields['rate']['value'] = bps_L1
elif bps_L2 is not None:
validate_type('bps_L2', bps_L2, [float, int])
self.fields['rate']['type'] = 'bps_L2'
self.fields['rate']['value'] = bps_L2
elif percentage is not None:
validate_type('percentage', percentage, [float, int])
if not (percentage > 0 and percentage <= 100):
raise TRexArgumentError('percentage', percentage)
self.fields['rate']['type'] = 'percentage'
self.fields['rate']['value'] = percentage
def to_json (self):
return dict(self.fields)
@staticmethod
def from_json (json_data):
try:
mode = json_data['mode']
rate = mode['rate']
# check the rate type
if rate['type'] not in ['pps', 'bps_L1', 'bps_L2', 'percentage']:
raise TRexError("from_json: invalid rate type '{0}'".format(rate['type']))
# construct the pair
kwargs = {rate['type'] : rate['value']}
if mode['type'] == 'single_burst':
return STLTXSingleBurst(total_pkts = mode['total_pkts'], **kwargs)
elif mode['type'] == 'multi_burst':
return STLTXMultiBurst(pkts_per_burst = mode['pkts_per_burst'],
ibg = mode['ibg'],
count = mode['count'],
**kwargs)
elif mode['type'] == 'continuous':
return STLTXCont(**kwargs)
else:
raise TRexError("from_json: unknown mode type '{0}'".format(mode['type']))
except KeyError as e:
raise TRexError("from_json: missing field {0} from JSON".format(e))
# continuous mode
class STLTXCont(STLTXMode):
""" Continuous mode """
def __init__ (self, **kwargs):
"""
Continuous mode
see :class:`trex.stl.trex_stl_streams.STLTXMode` for rate
.. code-block:: python
# STLTXCont Example
mode = STLTXCont(pps = 10)
"""
super(STLTXCont, self).__init__(**kwargs)
self.fields['type'] = 'continuous'
@staticmethod
def __str__ ():
return "Continuous"
# single burst mode
class STLTXSingleBurst(STLTXMode):
""" Single burst mode """
def __init__ (self, total_pkts = 1, **kwargs):
"""
Single burst mode
:parameters:
total_pkts : int
Number of packets for this burst
see :class:`trex.stl.trex_stl_streams.STLTXMode` for rate
.. code-block:: python
# STLTXSingleBurst Example
mode = STLTXSingleBurst( pps = 10, total_pkts = 1)
"""
if not isinstance(total_pkts, int):
raise TRexArgumentError('total_pkts', total_pkts)
super(STLTXSingleBurst, self).__init__(**kwargs)
self.fields['type'] = 'single_burst'
self.fields['total_pkts'] = total_pkts
@staticmethod
def __str__ ():
return "Single Burst"
# multi burst mode
class STLTXMultiBurst(STLTXMode):
""" Multi-burst mode """
def __init__ (self,
pkts_per_burst = 1,
ibg = 0.0, # usec not SEC
count = 1,
**kwargs):
"""
Multi-burst mode
:parameters:
pkts_per_burst: int
Number of packets per burst
ibg : float
Inter-burst gap in usec 1,000,000.0 is 1 sec
count : int
Number of bursts
see :class:`trex.stl.trex_stl_streams.STLTXMode` for rate
.. code-block:: python
# STLTXMultiBurst Example
mode = STLTXMultiBurst(pps = 10, pkts_per_burst = 1,count 10, ibg=10.0)
"""
if not isinstance(pkts_per_burst, int):
raise TRexArgumentError('pkts_per_burst', pkts_per_burst)
if not isinstance(ibg, (int, float)):
raise TRexArgumentError('ibg', ibg)
if not isinstance(count, int):
raise TRexArgumentError('count', count)
super(STLTXMultiBurst, self).__init__(**kwargs)
self.fields['type'] = 'multi_burst'
self.fields['pkts_per_burst'] = pkts_per_burst
self.fields['ibg'] = ibg
self.fields['count'] = count
@staticmethod
def __str__ ():
return "Multi Burst"
STLStreamDstMAC_CFG_FILE=0
STLStreamDstMAC_PKT =1
STLStreamDstMAC_ARP =2
class STLFlowStatsInterface(object):
def __init__ (self, pg_id, vxlan):
self.fields = {
'enabled': True,
'stream_id': pg_id,
'vxlan': vxlan,
}
def to_json (self):
""" Dump as json"""
return dict(self.fields)
@staticmethod
def from_json (json_data):
'''
create the object from JSON output
'''
try:
# no flow stats
if not json_data['enabled']:
return None
# flow stats
if json_data['rule_type'] == 'stats':
return STLFlowStats(pg_id = json_data['stream_id'])
# latency
elif json_data['rule_type'] == 'latency':
return STLFlowLatencyStats(pg_id = json_data['stream_id'])
else:
raise TRexError("from_json: invalid flow stats type {0}".format(json_data['rule_type']))
except KeyError as e:
raise TRexError("from_json: missing field {0} from JSON".format(e))
@staticmethod
def defaults ():
return {'enabled' : False}
class STLFlowStats(STLFlowStatsInterface):
""" Define per stream basic stats
.. code-block:: python
# STLFlowStats Example
flow_stats = STLFlowStats(pg_id = 7)
"""
def __init__(self, pg_id, vxlan = False):
super(STLFlowStats, self).__init__(pg_id, vxlan)
self.fields['rule_type'] = 'stats'
class STLFlowLatencyStats(STLFlowStatsInterface):
""" Define per stream basic stats + latency, jitter, packet reorder/loss
.. code-block:: python
# STLFlowLatencyStats Example
flow_stats = STLFlowLatencyStats(pg_id = 7)
"""
def __init__(self, pg_id, vxlan = False):
super(STLFlowLatencyStats, self).__init__(pg_id, vxlan)
self.fields['rule_type'] = 'latency'
class STLStream(object):
""" One stream object. Includes mode, Field Engine mode packet template and Rx stats
.. code-block:: python
# STLStream Example
base_pkt = Ether()/IP(src="172.16.31.10",dst="172.16.31.10")/UDP(dport=12,sport=1025)
pad = max(0, size - len(base_pkt)) * 'x'
STLStream( isg = 10.0, # star in delay
name ='S0',
packet = STLPktBuilder(pkt = base_pkt/pad),
mode = STLTXSingleBurst( pps = 10, total_pkts = 1),
next = 'S1'), # point to next stream
"""
def __init__ (self,
name = None,
packet = None,
mode = STLTXCont(pps = 1),
enabled = True,
self_start = True,
isg = 0.0,
flow_stats = None,
next = None,
stream_id = None,
action_count = 0,
random_seed =0,
mac_src_override_by_pkt = None,
mac_dst_override_mode = None, #see STLStreamDstMAC_xx
dummy_stream = False,
start_paused = False,
core_id = -1
):
"""
Stream object
:parameters:
name : string
Name of the stream. Required if this stream is dependent on another stream, and another stream needs to refer to this stream by name.
packet : STLPktBuilder see :class:`trex.stl.trex_stl_packet_builder_scapy.STLPktBuilder`
Template packet and field engine program. Example: packet = STLPktBuilder(pkt = base_pkt/pad)
mode : :class:`trex.stl.trex_stl_streams.STLTXCont` or :class:`trex.stl.trex_stl_streams.STLTXSingleBurst` or :class:`trex.stl.trex_stl_streams.STLTXMultiBurst`
enabled : bool
Indicates whether the stream is enabled.
self_start : bool
If False, another stream activates it.
isg : float
Inter-stream gap in usec. Time to wait until the stream sends the first packet.
flow_stats : :class:`trex.stl.trex_stl_streams.STLFlowStats`
Per stream statistic object. See: STLFlowStats
next : string
Name of the stream to activate.
stream_id :
For use by HLTAPI.
action_count : uint16_t
If there is a next stream, number of loops before stopping. Default: 0 (unlimited).
random_seed: uint32_t
If given, the seed for this stream will be this value. Useful if you need a deterministic random value.
mac_src_override_by_pkt : bool
Template packet sets src MAC.
mac_dst_override_mode=None : STLStreamDstMAC_xx
Template packet sets dst MAC.
dummy_stream : bool
For delay purposes, will not be sent.
start_paused : bool
Experimental flag, might be removed in future!
Stream will not be transmitted until un-paused.
core_id: int
Pins the stream to core_id in case core_id is specified and 0 <= core_id < number of cores.
Default value = -1.
Negative value (default) keeps the current behaviour.
"""
# type checking
validate_type('name', name, (type(None), int, basestring))
validate_type('next', next, (type(None), int, basestring))
validate_type('mode', mode, STLTXMode)
validate_type('packet', packet, (type(None), CTrexPktBuilderInterface))
validate_type('flow_stats', flow_stats, (type(None), STLFlowStatsInterface))
validate_type('enabled', enabled, bool)
validate_type('self_start', self_start, bool)
validate_type('isg', isg, (int, float))
validate_type('stream_id', stream_id, (type(None), int))
validate_type('random_seed',random_seed,int)
validate_type('dummy_stream', dummy_stream, bool)
validate_type('start_paused', start_paused, bool)
validate_type('core_id', core_id, int)
if (type(mode) == STLTXCont) and (next != None):
raise TRexError("Continuous stream cannot have a next stream ID")
if (type(flow_stats) == STLFlowLatencyStats and core_id >= 0):
raise TRexError("Core ID is not supported for latency streams.")
# tag for the stream and next - can be anything
self.name = name
self.next = next
self.id = stream_id
# set externally
self.fields = {}
if not packet:
packet = STLPktBuilder(pkt = Ether()/IP())
self.scapy_pkt_builder = packet
# packet builder
packet.compile()
int_mac_src_override_by_pkt = 0;
int_mac_dst_override_mode = 0;
if mac_src_override_by_pkt | |
not None else kw_retries if kw_retries is not None else 0
backoff = backoff if backoff is not None else kw_backoff if kw_backoff is not None else 0.3
intervals = intervals or kw_intervals
if intervals and len(intervals) and all(isinstance(i, (int, float)) for i in intervals):
request_delta = [0] + intervals
else:
request_delta = [0] + [(backoff * (2 ** (retry + 1))) for retry in range(retries)]
no_retries = len(request_delta) == 1
# SSL verification settings
# ON by default, disable accordingly with any variant if matched
kw_ssl_verify = get_ssl_verify_option(method, url, settings, request_options=request_options)
ssl_verify = False if not kw_ssl_verify or not ssl_verify else True # pylint: disable=R1719
request_kwargs.setdefault("timeout", 5)
request_kwargs.setdefault("verify", ssl_verify)
# process request
resp = None
failures = []
no_cache = get_no_cache_option(request_kwargs.get("headers", {}), request_options)
region = "request"
request_args = (method, url, request_kwargs)
caching_args = (_request_cached, region, *request_args)
for retry, delay in enumerate(request_delta):
if retry:
code = resp.status_code if resp else None
if retry_after and resp and code in [HTTPTooManyRequests.code]:
after = resp.headers.get("Retry-After", "")
delay = int(after) if str(after).isdigit() else 0
LOGGER.debug("Received header [Retry-After=%ss] (code=%s) for [%s %s]", after, code, method, url)
LOGGER.debug("Retrying failed request after delay=%ss (code=%s) for [%s %s]", delay, code, method, url)
time.sleep(delay)
try:
if no_cache:
resp = _request_call(*request_args)
else:
resp = _request_cached(*request_args)
if allowed_codes and len(allowed_codes):
if resp.status_code in allowed_codes:
return resp
elif resp.status_code < (500 if only_server_errors else 400):
invalidate_region(caching_args)
return resp
invalidate_region(caching_args)
failures.append("{} ({})".format(getattr(resp, "reason", type(resp).__name__),
getattr(resp, "status_code", getattr(resp, "code", 500))))
# function called without retries raises original error as if calling requests module directly
except (requests.ConnectionError, requests.Timeout) as exc:
if no_retries:
raise
invalidate_region(caching_args)
failures.append(type(exc).__name__)
# also pass-through here if no retries
if no_retries and resp:
return resp
detail = "Request ran out of retries. Attempts generated following errors: {}".format(failures)
err = HTTPGatewayTimeout(detail=detail)
# make 'raise_for_status' method available for convenience
setattr(err, "url", url)
setattr(err, "reason", err.explanation)
setattr(err, "raise_for_status", lambda: Response.raise_for_status(err)) # noqa
return err
def download_file_http(file_reference, file_outdir, settings=None, **request_kwargs):
# type: (str, str, Optional[AnySettingsContainer], Any) -> str
"""
Downloads the file referenced by an HTTP URL location.
Respects :rfc:`2183`, :rfc:`5987` and :rfc:`6266` regarding ``Content-Disposition`` header handling to resolve
any preferred file name. This value is employed if it fulfill validation criteria. Otherwise, the name is extracted
from the last part of the URL path.
:param file_reference: HTTP URL where the file is hosted.
:param file_outdir: Output local directory path under which to place the downloaded file.
:param settings: Additional request-related settings from the application configuration (notably request-options).
:param request_kwargs: Additional keywords to forward to request call (if needed).
:return: Path of the local copy of the fetched file.
:raises HTTPException: applicable HTTP-based exception if any unrecoverable problem occurred during fetch request.
:raises ValueError: when resulting file name value is considered invalid.
"""
LOGGER.debug("Fetch file resolved as remote URL reference.")
request_kwargs.pop("stream", None)
resp = request_extra("get", file_reference, stream=True, retries=3, settings=settings, **request_kwargs)
if resp.status_code >= 400:
# use method since response object does not derive from Exception, therefore cannot be raised directly
if hasattr(resp, "raise_for_status"):
resp.raise_for_status()
raise resp
# resolve preferred file name or default to last fragment of request path
file_name = None
content_disposition = get_header("Content-Disposition", resp.headers)
if content_disposition:
LOGGER.debug("Detected Content-Disposition, looking for preferred file name...")
options = CaseInsensitiveDict(parse_extra_options(content_disposition, sep=";"))
file_name_param = options.get("filename")
file_name_star = options.get("filename*")
if file_name_star and "''" in file_name_star:
file_name_encoding, file_name_star = file_name_star.split("''")
try:
file_name_star = unquote(file_name_star, file_name_encoding, errors="strict")
except (LookupError, UnicodeDecodeError):
file_name_star = None
# security validation, remove any nested path and abort if any invalid characters
try:
file_name_maybe = (file_name_star or file_name_param or "").split("/")[-1].strip().replace(" ", "_")
file_name_maybe = FILE_NAME_QUOTE_PATTERN.match(file_name_maybe)[1]
if file_name_maybe and (3 < len(file_name_maybe) < 256):
file_name = file_name_maybe
LOGGER.debug("Using validated Content-Disposition preferred file name: [%s]", file_name)
except (IndexError, TypeError):
LOGGER.debug("Discarding Content-Disposition preferred file name due to failed validation.")
if not file_name:
file_name = urlparse(file_reference).path.split("/")[-1]
LOGGER.debug("Using default file name from URL path fragment: [%s]", file_name)
if not FILE_NAME_LOOSE_PATTERN.match(file_name):
raise ValueError(f"Invalid file name [{file_name!s}] resolved from URL [{file_reference}]. Aborting download.")
file_path = os.path.join(file_outdir, file_name)
with open(file_path, "wb") as file:
# NOTE:
# Setting 'chunk_size=None' lets the request find a suitable size according to
# available memory. Without this, it defaults to 1 which is extremely slow.
for chunk in resp.iter_content(chunk_size=None):
file.write(chunk)
return file_path
def fetch_file(file_reference, file_outdir, settings=None, link=None, **request_kwargs):
# type: (str, str, Optional[AnySettingsContainer], Optional[bool], Any) -> str
"""
Fetches a file from local path, AWS-S3 bucket or remote URL, and dumps it's content to the output directory.
The output directory is expected to exist prior to this function call.
The file reference scheme (protocol) determines from where to fetch the content.
Output file name and extension will be the same as the original (after link resolution if applicable).
Requests will consider ``weaver.request_options`` when using ``http(s)://`` scheme.
:param file_reference:
Local filesystem path (optionally prefixed with ``file://``), ``s3://`` bucket location or ``http(s)://``
remote URL file reference. Reference ``https://s3.[...]`` are also considered as ``s3://``.
:param file_outdir: Output local directory path under which to place the fetched file.
:param settings: Additional request-related settings from the application configuration (notably request-options).
:param link:
If ``True``, force generation of a symbolic link instead of hard copy, regardless if source is a file or link.
If ``False``, force hard copy of the file to destination, regardless if source is a file or link.
If ``None`` (default), resolve automatically as follows.
When the source is a symbolic link itself, the destination will also be a link.
When the source is a direct file reference, the destination will be a hard copy of the file.
Only applicable when the file reference is local.
:param request_kwargs: Additional keywords to forward to request call (if needed).
:return: Path of the local copy of the fetched file.
:raises HTTPException: applicable HTTP-based exception if any occurred during the operation.
:raises ValueError: when the reference scheme cannot be identified.
"""
file_href = file_reference
file_name = os.path.basename(os.path.realpath(file_reference)) # resolve any different name to use the original
file_path = os.path.join(file_outdir, file_name)
if file_reference.startswith("file://"):
file_reference = file_reference[7:]
LOGGER.debug("Fetching file reference: [%s]", file_href)
if os.path.isfile(file_reference):
LOGGER.debug("Fetch file resolved as local reference.")
# NOTE:
# If file is available locally and referenced as a system link, disabling 'follow_symlinks'
# creates a copy of the symlink instead of an extra hard-copy of the linked file.
if os.path.islink(file_reference) and not os.path.isfile(file_path):
if link is True:
os.symlink(os.readlink(file_reference), file_path)
else:
shutil.copyfile(file_reference, file_path, follow_symlinks=link is False)
# otherwise copy the file if not already available
# expand directory of 'file_path' and full 'file_reference' to ensure many symlink don't result in same place
elif not os.path.isfile(file_path) or os.path.realpath(file_path) != os.path.realpath(file_reference):
if link is True:
os.symlink(file_reference, file_path)
else:
shutil.copyfile(file_reference, file_path)
else:
LOGGER.debug("Fetch file as local reference has no action to take, file already exists: [%s]", file_path)
elif file_reference.startswith("s3://"):
LOGGER.debug("Fetch file resolved as S3 bucket reference.")
s3 = boto3.resource("s3")
bucket_name, file_key = file_reference[5:].split("/", 1)
bucket = s3.Bucket(bucket_name)
bucket.download_file(file_key, file_path)
elif file_reference.startswith("http"):
# pseudo-http URL referring to S3 bucket, try to redirect to above S3 handling method if applicable
if file_reference.startswith("https://s3."):
s3 = boto3.resource("s3")
# endpoint in the form: "https://s3.[region-name.]amazonaws.com/<bucket>/<file-key>"
if not file_reference.startswith(s3.meta.endpoint_url):
LOGGER.warning("Detected HTTP file reference to AWS S3 bucket that mismatches server configuration. "
"Will consider it as plain HTTP with read access.")
else:
file_ref_updated = "s3://{}".format(file_reference.replace(s3.meta.endpoint_url, ""))
LOGGER.debug("Adjusting file reference to S3 shorthand for further parsing:\n"
" Initial: [%s]\n"
" Updated: [%s]", file_reference, file_ref_updated)
return fetch_file(file_ref_updated, file_outdir, settings=settings, **request_kwargs)
file_path = download_file_http(file_reference, file_outdir, settings=settings, **request_kwargs)
else:
scheme = file_reference.split("://")
scheme = "<none>" if len(scheme) < 2 else scheme[0]
raise ValueError("Unresolved location and/or fetch file scheme: '{!s}', supported: {}, reference: [{!s}]"
.format(scheme, list(SUPPORTED_FILE_SCHEMES), file_reference))
LOGGER.debug("Fetch file resolved:\n"
" Reference: [%s]\n"
" File Path: [%s]", file_href, file_path)
return file_path
def load_file(file_path):
"""
Load JSON or YAML file contents from local path or remote URL.
If URL, get the content and validate it by loading, otherwise load file directly.
:raises ValueError: if YAML or JSON cannot be parsed or loaded from location.
"""
| |
1, 0x4225),
Register('vr2_16_4', 2, 0x4226),
Register('vr2_8_9', 1, 0x4226),
Register('vr2_8_8', 1, 0x4227),
Register('vr2_64_0', 8, 0x4228),
Register('vr2_32_1', 4, 0x4228),
Register('vr2_16_3', 2, 0x4228),
Register('vr2_8_7', 1, 0x4228),
Register('vr2_8_6', 1, 0x4229),
Register('vr2_16_2', 2, 0x422a),
Register('vr2_8_5', 1, 0x422a),
Register('vr2_8_4', 1, 0x422b),
Register('vr2_32_0', 4, 0x422c),
Register('vr2_16_1', 2, 0x422c),
Register('vr2_8_3', 1, 0x422c),
Register('vr2_8_2', 1, 0x422d),
Register('vr2_16_0', 2, 0x422e),
Register('vr2_8_1', 1, 0x422e),
Register('vr2_8_0', 1, 0x422f),
Register('vs35', 16, 0x4230),
Register('vr3_64_1', 8, 0x4230),
Register('vr3_32_3', 4, 0x4230),
Register('vr3_16_7', 2, 0x4230),
Register('vr3_8_15', 1, 0x4230),
Register('vr3_8_14', 1, 0x4231),
Register('vr3_16_6', 2, 0x4232),
Register('vr3_8_13', 1, 0x4232),
Register('vr3_8_12', 1, 0x4233),
Register('vr3_32_2', 4, 0x4234),
Register('vr3_16_5', 2, 0x4234),
Register('vr3_8_11', 1, 0x4234),
Register('vr3_8_10', 1, 0x4235),
Register('vr3_16_4', 2, 0x4236),
Register('vr3_8_9', 1, 0x4236),
Register('vr3_8_8', 1, 0x4237),
Register('vr3_64_0', 8, 0x4238),
Register('vr3_32_1', 4, 0x4238),
Register('vr3_16_3', 2, 0x4238),
Register('vr3_8_7', 1, 0x4238),
Register('vr3_8_6', 1, 0x4239),
Register('vr3_16_2', 2, 0x423a),
Register('vr3_8_5', 1, 0x423a),
Register('vr3_8_4', 1, 0x423b),
Register('vr3_32_0', 4, 0x423c),
Register('vr3_16_1', 2, 0x423c),
Register('vr3_8_3', 1, 0x423c),
Register('vr3_8_2', 1, 0x423d),
Register('vr3_16_0', 2, 0x423e),
Register('vr3_8_1', 1, 0x423e),
Register('vr3_8_0', 1, 0x423f),
Register('vs36', 16, 0x4240),
Register('vr4_64_1', 8, 0x4240),
Register('vr4_32_3', 4, 0x4240),
Register('vr4_16_7', 2, 0x4240),
Register('vr4_8_15', 1, 0x4240),
Register('vr4_8_14', 1, 0x4241),
Register('vr4_16_6', 2, 0x4242),
Register('vr4_8_13', 1, 0x4242),
Register('vr4_8_12', 1, 0x4243),
Register('vr4_32_2', 4, 0x4244),
Register('vr4_16_5', 2, 0x4244),
Register('vr4_8_11', 1, 0x4244),
Register('vr4_8_10', 1, 0x4245),
Register('vr4_16_4', 2, 0x4246),
Register('vr4_8_9', 1, 0x4246),
Register('vr4_8_8', 1, 0x4247),
Register('vr4_64_0', 8, 0x4248),
Register('vr4_32_1', 4, 0x4248),
Register('vr4_16_3', 2, 0x4248),
Register('vr4_8_7', 1, 0x4248),
Register('vr4_8_6', 1, 0x4249),
Register('vr4_16_2', 2, 0x424a),
Register('vr4_8_5', 1, 0x424a),
Register('vr4_8_4', 1, 0x424b),
Register('vr4_32_0', 4, 0x424c),
Register('vr4_16_1', 2, 0x424c),
Register('vr4_8_3', 1, 0x424c),
Register('vr4_8_2', 1, 0x424d),
Register('vr4_16_0', 2, 0x424e),
Register('vr4_8_1', 1, 0x424e),
Register('vr4_8_0', 1, 0x424f),
Register('vs37', 16, 0x4250),
Register('vr5_64_1', 8, 0x4250),
Register('vr5_32_3', 4, 0x4250),
Register('vr5_16_7', 2, 0x4250),
Register('vr5_8_15', 1, 0x4250),
Register('vr5_8_14', 1, 0x4251),
Register('vr5_16_6', 2, 0x4252),
Register('vr5_8_13', 1, 0x4252),
Register('vr5_8_12', 1, 0x4253),
Register('vr5_32_2', 4, 0x4254),
Register('vr5_16_5', 2, 0x4254),
Register('vr5_8_11', 1, 0x4254),
Register('vr5_8_10', 1, 0x4255),
Register('vr5_16_4', 2, 0x4256),
Register('vr5_8_9', 1, 0x4256),
Register('vr5_8_8', 1, 0x4257),
Register('vr5_64_0', 8, 0x4258),
Register('vr5_32_1', 4, 0x4258),
Register('vr5_16_3', 2, 0x4258),
Register('vr5_8_7', 1, 0x4258),
Register('vr5_8_6', 1, 0x4259),
Register('vr5_16_2', 2, 0x425a),
Register('vr5_8_5', 1, 0x425a),
Register('vr5_8_4', 1, 0x425b),
Register('vr5_32_0', 4, 0x425c),
Register('vr5_16_1', 2, 0x425c),
Register('vr5_8_3', 1, 0x425c),
Register('vr5_8_2', 1, 0x425d),
Register('vr5_16_0', 2, 0x425e),
Register('vr5_8_1', 1, 0x425e),
Register('vr5_8_0', 1, 0x425f),
Register('vs38', 16, 0x4260),
Register('vr6_64_1', 8, 0x4260),
Register('vr6_32_3', 4, 0x4260),
Register('vr6_16_7', 2, 0x4260),
Register('vr6_8_15', 1, 0x4260),
Register('vr6_8_14', 1, 0x4261),
Register('vr6_16_6', 2, 0x4262),
Register('vr6_8_13', 1, 0x4262),
Register('vr6_8_12', 1, 0x4263),
Register('vr6_32_2', 4, 0x4264),
Register('vr6_16_5', 2, 0x4264),
Register('vr6_8_11', 1, 0x4264),
Register('vr6_8_10', 1, 0x4265),
Register('vr6_16_4', 2, 0x4266),
Register('vr6_8_9', 1, 0x4266),
Register('vr6_8_8', 1, 0x4267),
Register('vr6_64_0', 8, 0x4268),
Register('vr6_32_1', 4, 0x4268),
Register('vr6_16_3', 2, 0x4268),
Register('vr6_8_7', 1, 0x4268),
Register('vr6_8_6', 1, 0x4269),
Register('vr6_16_2', 2, 0x426a),
Register('vr6_8_5', 1, 0x426a),
Register('vr6_8_4', 1, 0x426b),
Register('vr6_32_0', 4, 0x426c),
Register('vr6_16_1', 2, 0x426c),
Register('vr6_8_3', 1, 0x426c),
Register('vr6_8_2', 1, 0x426d),
Register('vr6_16_0', 2, 0x426e),
Register('vr6_8_1', 1, 0x426e),
Register('vr6_8_0', 1, 0x426f),
Register('vs39', 16, 0x4270),
Register('vr7_64_1', 8, 0x4270),
Register('vr7_32_3', 4, 0x4270),
Register('vr7_16_7', 2, 0x4270),
Register('vr7_8_15', 1, 0x4270),
Register('vr7_8_14', 1, 0x4271),
Register('vr7_16_6', 2, 0x4272),
Register('vr7_8_13', 1, 0x4272),
Register('vr7_8_12', 1, 0x4273),
Register('vr7_32_2', 4, 0x4274),
Register('vr7_16_5', 2, 0x4274),
Register('vr7_8_11', 1, 0x4274),
Register('vr7_8_10', 1, 0x4275),
Register('vr7_16_4', 2, 0x4276),
Register('vr7_8_9', 1, 0x4276),
Register('vr7_8_8', 1, 0x4277),
Register('vr7_64_0', 8, 0x4278),
Register('vr7_32_1', 4, 0x4278),
Register('vr7_16_3', 2, 0x4278),
Register('vr7_8_7', 1, 0x4278),
Register('vr7_8_6', 1, 0x4279),
Register('vr7_16_2', 2, 0x427a),
Register('vr7_8_5', 1, 0x427a),
Register('vr7_8_4', 1, 0x427b),
Register('vr7_32_0', 4, 0x427c),
Register('vr7_16_1', 2, 0x427c),
Register('vr7_8_3', 1, 0x427c),
Register('vr7_8_2', 1, 0x427d),
Register('vr7_16_0', 2, 0x427e),
Register('vr7_8_1', 1, 0x427e),
Register('vr7_8_0', 1, 0x427f),
Register('vs40', 16, 0x4280),
Register('vr8_64_1', 8, 0x4280),
Register('vr8_32_3', 4, 0x4280),
Register('vr8_16_7', 2, 0x4280),
Register('vr8_8_15', 1, 0x4280),
Register('vr8_8_14', 1, 0x4281),
Register('vr8_16_6', 2, 0x4282),
Register('vr8_8_13', 1, 0x4282),
Register('vr8_8_12', 1, 0x4283),
Register('vr8_32_2', 4, 0x4284),
Register('vr8_16_5', 2, 0x4284),
Register('vr8_8_11', 1, 0x4284),
Register('vr8_8_10', 1, 0x4285),
Register('vr8_16_4', 2, 0x4286),
Register('vr8_8_9', 1, 0x4286),
Register('vr8_8_8', 1, 0x4287),
Register('vr8_64_0', 8, 0x4288),
Register('vr8_32_1', 4, 0x4288),
Register('vr8_16_3', 2, 0x4288),
Register('vr8_8_7', 1, 0x4288),
Register('vr8_8_6', 1, 0x4289),
Register('vr8_16_2', 2, 0x428a),
Register('vr8_8_5', 1, 0x428a),
Register('vr8_8_4', 1, 0x428b),
Register('vr8_32_0', 4, 0x428c),
Register('vr8_16_1', 2, 0x428c),
Register('vr8_8_3', 1, 0x428c),
Register('vr8_8_2', 1, 0x428d),
Register('vr8_16_0', 2, 0x428e),
Register('vr8_8_1', 1, 0x428e),
Register('vr8_8_0', 1, 0x428f),
Register('vs41', 16, 0x4290),
Register('vr9_64_1', 8, 0x4290),
Register('vr9_32_3', 4, 0x4290),
Register('vr9_16_7', 2, 0x4290),
Register('vr9_8_15', 1, 0x4290),
Register('vr9_8_14', 1, 0x4291),
Register('vr9_16_6', 2, 0x4292),
Register('vr9_8_13', 1, 0x4292),
Register('vr9_8_12', 1, 0x4293),
Register('vr9_32_2', 4, 0x4294),
Register('vr9_16_5', 2, 0x4294),
Register('vr9_8_11', 1, 0x4294),
Register('vr9_8_10', 1, 0x4295),
Register('vr9_16_4', 2, 0x4296),
Register('vr9_8_9', 1, 0x4296),
Register('vr9_8_8', 1, 0x4297),
Register('vr9_64_0', 8, 0x4298),
Register('vr9_32_1', 4, 0x4298),
Register('vr9_16_3', 2, 0x4298),
Register('vr9_8_7', 1, 0x4298),
Register('vr9_8_6', 1, 0x4299),
Register('vr9_16_2', 2, 0x429a),
Register('vr9_8_5', 1, 0x429a),
Register('vr9_8_4', 1, 0x429b),
Register('vr9_32_0', 4, 0x429c),
Register('vr9_16_1', 2, 0x429c),
Register('vr9_8_3', 1, 0x429c),
Register('vr9_8_2', 1, 0x429d),
Register('vr9_16_0', 2, 0x429e),
Register('vr9_8_1', 1, 0x429e),
Register('vr9_8_0', 1, 0x429f),
Register('vs42', 16, 0x42a0),
Register('vr10_64_1', 8, 0x42a0),
Register('vr10_32_3', 4, 0x42a0),
Register('vr10_16_7', 2, 0x42a0),
Register('vr10_8_15', 1, 0x42a0),
Register('vr10_8_14', 1, 0x42a1),
Register('vr10_16_6', 2, 0x42a2),
Register('vr10_8_13', 1, 0x42a2),
Register('vr10_8_12', 1, 0x42a3),
Register('vr10_32_2', 4, 0x42a4),
Register('vr10_16_5', 2, 0x42a4),
Register('vr10_8_11', 1, 0x42a4),
Register('vr10_8_10', 1, 0x42a5),
Register('vr10_16_4', 2, 0x42a6),
Register('vr10_8_9', 1, 0x42a6),
Register('vr10_8_8', 1, 0x42a7),
Register('vr10_64_0', 8, 0x42a8),
Register('vr10_32_1', 4, 0x42a8),
Register('vr10_16_3', 2, 0x42a8),
Register('vr10_8_7', 1, 0x42a8),
Register('vr10_8_6', 1, 0x42a9),
Register('vr10_16_2', 2, 0x42aa),
Register('vr10_8_5', 1, 0x42aa),
Register('vr10_8_4', 1, 0x42ab),
Register('vr10_32_0', 4, 0x42ac),
Register('vr10_16_1', 2, 0x42ac),
Register('vr10_8_3', 1, 0x42ac),
Register('vr10_8_2', 1, 0x42ad),
Register('vr10_16_0', 2, 0x42ae),
Register('vr10_8_1', 1, 0x42ae),
Register('vr10_8_0', 1, 0x42af),
Register('vs43', 16, 0x42b0),
Register('vr11_64_1', 8, 0x42b0),
Register('vr11_32_3', 4, 0x42b0),
Register('vr11_16_7', 2, 0x42b0),
Register('vr11_8_15', 1, 0x42b0),
Register('vr11_8_14', 1, 0x42b1),
Register('vr11_16_6', 2, 0x42b2),
Register('vr11_8_13', 1, 0x42b2),
Register('vr11_8_12', 1, 0x42b3),
Register('vr11_32_2', 4, 0x42b4),
Register('vr11_16_5', 2, 0x42b4),
Register('vr11_8_11', 1, 0x42b4),
Register('vr11_8_10', 1, 0x42b5),
Register('vr11_16_4', 2, 0x42b6),
Register('vr11_8_9', 1, 0x42b6),
Register('vr11_8_8', 1, 0x42b7),
Register('vr11_64_0', 8, 0x42b8),
Register('vr11_32_1', 4, 0x42b8),
Register('vr11_16_3', 2, 0x42b8),
Register('vr11_8_7', 1, 0x42b8),
Register('vr11_8_6', 1, 0x42b9),
Register('vr11_16_2', 2, 0x42ba),
Register('vr11_8_5', 1, 0x42ba),
Register('vr11_8_4', 1, 0x42bb),
Register('vr11_32_0', 4, 0x42bc),
Register('vr11_16_1', 2, 0x42bc),
Register('vr11_8_3', 1, 0x42bc),
Register('vr11_8_2', 1, 0x42bd),
Register('vr11_16_0', 2, 0x42be),
Register('vr11_8_1', 1, 0x42be),
Register('vr11_8_0', 1, 0x42bf),
Register('vs44', 16, 0x42c0),
Register('vr12_64_1', 8, 0x42c0),
Register('vr12_32_3', 4, 0x42c0),
Register('vr12_16_7', 2, 0x42c0),
Register('vr12_8_15', 1, 0x42c0),
Register('vr12_8_14', 1, 0x42c1),
Register('vr12_16_6', 2, 0x42c2),
Register('vr12_8_13', 1, 0x42c2),
Register('vr12_8_12', 1, 0x42c3),
Register('vr12_32_2', 4, 0x42c4),
Register('vr12_16_5', 2, 0x42c4),
Register('vr12_8_11', 1, 0x42c4),
Register('vr12_8_10', 1, 0x42c5),
Register('vr12_16_4', 2, 0x42c6),
Register('vr12_8_9', 1, 0x42c6),
Register('vr12_8_8', 1, 0x42c7),
Register('vr12_64_0', 8, 0x42c8),
Register('vr12_32_1', 4, 0x42c8),
Register('vr12_16_3', 2, 0x42c8),
Register('vr12_8_7', 1, 0x42c8),
Register('vr12_8_6', 1, 0x42c9),
Register('vr12_16_2', 2, 0x42ca),
Register('vr12_8_5', 1, 0x42ca),
Register('vr12_8_4', 1, 0x42cb),
Register('vr12_32_0', 4, 0x42cc),
Register('vr12_16_1', 2, 0x42cc),
Register('vr12_8_3', 1, 0x42cc),
Register('vr12_8_2', 1, 0x42cd),
Register('vr12_16_0', 2, 0x42ce),
Register('vr12_8_1', 1, 0x42ce),
Register('vr12_8_0', 1, 0x42cf),
Register('vs45', 16, 0x42d0),
Register('vr13_64_1', 8, 0x42d0),
Register('vr13_32_3', 4, 0x42d0),
Register('vr13_16_7', 2, 0x42d0),
Register('vr13_8_15', 1, 0x42d0),
Register('vr13_8_14', 1, 0x42d1),
Register('vr13_16_6', 2, 0x42d2),
Register('vr13_8_13', 1, 0x42d2),
Register('vr13_8_12', 1, 0x42d3),
Register('vr13_32_2', 4, 0x42d4),
Register('vr13_16_5', 2, 0x42d4),
Register('vr13_8_11', 1, 0x42d4),
Register('vr13_8_10', 1, 0x42d5),
Register('vr13_16_4', 2, 0x42d6),
Register('vr13_8_9', 1, 0x42d6),
Register('vr13_8_8', 1, 0x42d7),
Register('vr13_64_0', 8, 0x42d8),
Register('vr13_32_1', 4, 0x42d8),
Register('vr13_16_3', 2, 0x42d8),
Register('vr13_8_7', 1, 0x42d8),
Register('vr13_8_6', 1, 0x42d9),
Register('vr13_16_2', 2, 0x42da),
Register('vr13_8_5', 1, 0x42da),
Register('vr13_8_4', 1, 0x42db),
Register('vr13_32_0', 4, 0x42dc),
Register('vr13_16_1', 2, 0x42dc),
Register('vr13_8_3', 1, 0x42dc),
Register('vr13_8_2', 1, 0x42dd),
Register('vr13_16_0', 2, 0x42de),
Register('vr13_8_1', 1, 0x42de),
Register('vr13_8_0', 1, 0x42df),
Register('vs46', 16, 0x42e0),
Register('vr14_64_1', 8, 0x42e0),
Register('vr14_32_3', 4, 0x42e0),
Register('vr14_16_7', 2, 0x42e0),
Register('vr14_8_15', 1, 0x42e0),
Register('vr14_8_14', 1, 0x42e1),
Register('vr14_16_6', 2, 0x42e2),
Register('vr14_8_13', 1, 0x42e2),
Register('vr14_8_12', 1, 0x42e3),
Register('vr14_32_2', 4, 0x42e4),
Register('vr14_16_5', 2, 0x42e4),
Register('vr14_8_11', 1, 0x42e4),
Register('vr14_8_10', 1, 0x42e5),
Register('vr14_16_4', 2, 0x42e6),
Register('vr14_8_9', 1, 0x42e6),
Register('vr14_8_8', 1, 0x42e7),
Register('vr14_64_0', 8, 0x42e8),
Register('vr14_32_1', 4, 0x42e8),
Register('vr14_16_3', 2, 0x42e8),
Register('vr14_8_7', 1, 0x42e8),
Register('vr14_8_6', 1, 0x42e9),
Register('vr14_16_2', 2, 0x42ea),
Register('vr14_8_5', 1, 0x42ea),
Register('vr14_8_4', 1, 0x42eb),
Register('vr14_32_0', 4, 0x42ec),
Register('vr14_16_1', 2, 0x42ec),
Register('vr14_8_3', 1, 0x42ec),
Register('vr14_8_2', 1, 0x42ed),
Register('vr14_16_0', 2, 0x42ee),
Register('vr14_8_1', 1, 0x42ee),
Register('vr14_8_0', 1, 0x42ef),
Register('vs47', 16, 0x42f0),
Register('vr15_64_1', 8, 0x42f0),
Register('vr15_32_3', 4, 0x42f0),
Register('vr15_16_7', 2, 0x42f0),
Register('vr15_8_15', 1, 0x42f0),
Register('vr15_8_14', 1, 0x42f1),
Register('vr15_16_6', 2, 0x42f2),
Register('vr15_8_13', 1, 0x42f2),
Register('vr15_8_12', 1, 0x42f3),
Register('vr15_32_2', 4, 0x42f4),
Register('vr15_16_5', 2, 0x42f4),
Register('vr15_8_11', 1, 0x42f4),
Register('vr15_8_10', 1, 0x42f5),
Register('vr15_16_4', 2, 0x42f6),
Register('vr15_8_9', 1, 0x42f6),
Register('vr15_8_8', 1, 0x42f7),
Register('vr15_64_0', 8, 0x42f8),
Register('vr15_32_1', 4, 0x42f8),
Register('vr15_16_3', 2, 0x42f8),
Register('vr15_8_7', 1, 0x42f8),
Register('vr15_8_6', 1, 0x42f9),
Register('vr15_16_2', 2, 0x42fa),
Register('vr15_8_5', 1, 0x42fa),
Register('vr15_8_4', 1, 0x42fb),
Register('vr15_32_0', 4, 0x42fc),
Register('vr15_16_1', 2, 0x42fc),
Register('vr15_8_3', 1, 0x42fc),
Register('vr15_8_2', 1, 0x42fd),
Register('vr15_16_0', 2, 0x42fe),
Register('vr15_8_1', 1, 0x42fe),
Register('vr15_8_0', 1, 0x42ff),
Register('vs48', 16, 0x4300),
Register('vr16_64_1', 8, 0x4300),
Register('vr16_32_3', 4, 0x4300),
Register('vr16_16_7', 2, 0x4300),
Register('vr16_8_15', 1, 0x4300),
| |
and outlet")
if ((self.config.pressure_change_type != PressureChangeType.fixed_per_stage)
or (self.config.mass_transfer_coefficient == MassTransferCoefficient.calculated)):
self.length = Var(
initialize=1,
bounds=(1e-8, 1e6),
domain=NonNegativeReals,
units=units_meta('length'),
doc='Effective membrane length')
if self.config.pressure_change_type == PressureChangeType.fixed_per_unit_length:
self.dP_dx = Var(
self.flowsheet().config.time,
initialize=-1e5,
bounds=(None, -1e-10),
domain=NegativeReals,
units=units_meta('pressure')*units_meta('length')**-1,
doc="Decrease in pressure per unit length across feed channel")
if self.config.pressure_change_type == PressureChangeType.calculated:
self.velocity_io = Var(
self.flowsheet().config.time,
self.io_list,
initialize=1,
bounds=(1e-8, 10),
domain=NonNegativeReals,
units=units_meta('length')/units_meta('time'),
doc="Crossflow velocity in feed channel at inlet and outlet")
self.friction_factor_darcy_io = Var(
self.flowsheet().config.time,
self.io_list,
initialize=1,
bounds=(1e-8, 10),
domain=NonNegativeReals,
units=pyunits.dimensionless,
doc="Darcy friction factor in feed channel at inlet and outlet")
self.dP_dx_io = Var(
self.flowsheet().config.time,
self.io_list,
initialize=-1e5,
bounds=(None, -1e-10),
domain=NegativeReals,
units=units_meta('pressure')*units_meta('length')**-1,
doc="Pressure drop per unit length in feed channel at inlet and outlet")
# Build control volume for feed side
self.feed_side = ControlVolume0DBlock(default={
"dynamic": False,
"has_holdup": False,
"property_package": self.config.property_package,
"property_package_args": self.config.property_package_args})
self.feed_side.add_state_blocks(
has_phase_equilibrium=False)
self.feed_side.add_material_balances(
balance_type=self.config.material_balance_type,
has_mass_transfer=True)
self.feed_side.add_energy_balances(
balance_type=self.config.energy_balance_type,
has_enthalpy_transfer=True)
self.feed_side.add_momentum_balances(
balance_type=self.config.momentum_balance_type,
has_pressure_change=self.config.has_pressure_change)
# Add additional state blocks
tmp_dict = dict(**self.config.property_package_args)
tmp_dict["has_phase_equilibrium"] = False
tmp_dict["parameters"] = self.config.property_package
tmp_dict["defined_state"] = False # these blocks are not inlets
# Interface properties
self.feed_side.properties_interface_in = self.config.property_package.state_block_class(
self.flowsheet().config.time,
doc="Material properties of feed-side interface at inlet",
default=tmp_dict)
self.feed_side.properties_interface_out = self.config.property_package.state_block_class(
self.flowsheet().config.time,
doc="Material properties of feed-side interface at outlet",
default=tmp_dict)
# Permeate properties
self.properties_permeate = self.config.property_package.state_block_class(
self.flowsheet().config.time,
doc="Material properties of permeate",
default=tmp_dict)
# Add Ports
self.add_inlet_port(name='inlet', block=self.feed_side)
self.add_outlet_port(name='retentate', block=self.feed_side)
self.add_port(name='permeate', block=self.properties_permeate)
# References for control volume
# pressure change
if (self.config.has_pressure_change is True and
self.config.momentum_balance_type != 'none'):
self.deltaP = Reference(self.feed_side.deltaP)
# mass transfer
self.mass_transfer_phase_comp = Var(
self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
initialize=1,
bounds=(1e-8, 1e6),
domain=NonNegativeReals,
units=units_meta('mass') * units_meta('time')**-1,
doc='Mass transfer to permeate')
@self.Constraint(self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Mass transfer term")
def eq_mass_transfer_term(self, t, p, j):
return self.mass_transfer_phase_comp[t, p, j] == -self.feed_side.mass_transfer_term[t, p, j]
# RO performance equations
@self.Expression(self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Average flux expression")
def flux_mass_phase_comp_avg(b, t, p, j):
return 0.5 * sum(b.flux_mass_io_phase_comp[t, x, p, j] for x in self.io_list)
@self.Constraint(self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Permeate production")
def eq_permeate_production(b, t, p, j):
return (b.properties_permeate[t].get_material_flow_terms(p, j)
== b.area * b.flux_mass_phase_comp_avg[t, p, j])
@self.Constraint(self.flowsheet().config.time,
self.io_list,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Water and salt flux")
def eq_flux_io(b, t, x, p, j):
if x == 'in':
prop_feed = b.feed_side.properties_in[t]
prop_feed_inter = b.feed_side.properties_interface_in[t]
elif x == 'out':
prop_feed = b.feed_side.properties_out[t]
prop_feed_inter = b.feed_side.properties_interface_out[t]
prop_perm = b.properties_permeate[t]
comp = self.config.property_package.get_component(j)
if comp.is_solvent():
return (b.flux_mass_io_phase_comp[t, x, p, j] == b.A_comp[t, j] * b.dens_solvent
* ((prop_feed.pressure - prop_perm.pressure)
- (prop_feed_inter.pressure_osm - prop_perm.pressure_osm)))
elif comp.is_solute():
return (b.flux_mass_io_phase_comp[t, x, p, j] == b.B_comp[t, j]
* (prop_feed_inter.conc_mass_phase_comp[p, j] - prop_perm.conc_mass_phase_comp[p, j]))
# Feed and permeate-side connection
@self.Constraint(self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Mass transfer from feed to permeate")
def eq_connect_mass_transfer(b, t, p, j):
return (b.properties_permeate[t].get_material_flow_terms(p, j)
== -b.feed_side.mass_transfer_term[t, p, j])
@self.Constraint(self.flowsheet().config.time,
doc="Enthalpy transfer from feed to permeate")
def eq_connect_enthalpy_transfer(b, t):
return (b.properties_permeate[t].get_enthalpy_flow_terms('Liq')
== -b.feed_side.enthalpy_transfer[t])
@self.Constraint(self.flowsheet().config.time,
doc="Isothermal assumption for permeate")
def eq_permeate_isothermal(b, t):
return b.feed_side.properties_out[t].temperature == \
b.properties_permeate[t].temperature
# Concentration polarization
@self.feed_side.Constraint(self.flowsheet().config.time,
self.io_list,
self.solute_list,
doc="Concentration polarization")
def eq_concentration_polarization_io(b, t, x, j):
if x == 'in':
prop_io = b.properties_in[t]
prop_interface_io = b.properties_interface_in[t]
elif x == 'out':
prop_io = b.properties_out[t]
prop_interface_io = b.properties_interface_out[t]
if self.config.concentration_polarization_type == ConcentrationPolarizationType.none:
return prop_interface_io.conc_mass_phase_comp['Liq', j] == \
prop_io.conc_mass_phase_comp['Liq', j]
elif self.config.concentration_polarization_type == ConcentrationPolarizationType.fixed:
return (prop_interface_io.conc_mass_phase_comp['Liq', j] ==
prop_io.conc_mass_phase_comp['Liq', j]
* self.cp_modulus[t, j])
elif self.config.concentration_polarization_type == ConcentrationPolarizationType.calculated:
jw = self.flux_mass_io_phase_comp[t, x, 'Liq', 'H2O'] / self.dens_solvent
js = self.flux_mass_io_phase_comp[t, x, 'Liq', j]
return (prop_interface_io.conc_mass_phase_comp['Liq', j] ==
(prop_io.conc_mass_phase_comp['Liq', j] - js / jw)
* exp(jw / self.Kf_io[t, x, j])
+ js / jw)
# Mass transfer coefficient calculation
if self.config.mass_transfer_coefficient == MassTransferCoefficient.calculated:
@self.Constraint(self.flowsheet().config.time,
self.io_list,
self.solute_list,
doc="Mass transfer coefficient in feed channel")
def eq_Kf_io(b, t, x, j):
if x == 'in':
prop_io = b.feed_side.properties_in[t]
elif x == 'out':
prop_io = b.feed_side.properties_out[t]
return (b.Kf_io[t, x, j] ==
prop_io.diffus_phase['Liq'] # TODO: add diff coefficient to SW prop and consider multi-components
/ b.dh
* b.N_Sh_io[t, x])
@self.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Sherwood number")
def eq_N_Sh_io(b, t, x):
return (b.N_Sh_io[t, x] ==
0.46 * (b.N_Re_io[t, x] * b.N_Sc_io[t, x])**0.36)
@self.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Schmidt number")
def eq_N_Sc_io(b, t, x):
if x == 'in':
prop_io = b.feed_side.properties_in[t]
elif x == 'out':
prop_io = b.feed_side.properties_out[t]
return (b.N_Sc_io[t, x] ==
prop_io.visc_d_phase['Liq']
/ prop_io.dens_mass_phase['Liq']
/ prop_io.diffus_phase['Liq'])
@self.Constraint(doc="Membrane area")
def eq_area(b):
return b.area == b.length * b.width
if (self.config.mass_transfer_coefficient == MassTransferCoefficient.calculated
or self.config.pressure_change_type == PressureChangeType.calculated):
@self.Expression(doc="Cross-sectional area")
def area_cross(b):
return b.channel_height * b.width * b.spacer_porosity
@self.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Reynolds number")
def eq_N_Re_io(b, t, x):
if x == 'in':
prop_io = b.feed_side.properties_in[t]
elif x == 'out':
prop_io = b.feed_side.properties_out[t]
return (b.N_Re_io[t, x] ==
sum(prop_io.flow_mass_phase_comp['Liq', j] for j in b.solute_list)
/ b.area_cross
* b.dh
/ prop_io.visc_d_phase['Liq'])
@self.Constraint(doc="Hydraulic diameter") # TODO: add detail related to spacer geometry
def eq_dh(b):
return (b.dh ==
2 * (b.channel_height * b.width)
/ (b.channel_height + b.width))
if self.config.pressure_change_type == PressureChangeType.fixed_per_unit_length:
# Pressure change equation when dP/dx = user-specified constant,
@self.Constraint(self.flowsheet().config.time,
doc="pressure change due to friction")
def eq_pressure_change(b, t):
return (b.deltaP[t] == b.dP_dx[t] * b.length)
elif self.config.pressure_change_type == PressureChangeType.calculated:
# Crossflow velocity at inlet and outlet
@self.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Crossflow velocity constraint")
def eq_velocity_io(b, t, x):
if x == 'in':
prop_io = b.feed_side.properties_in[t]
elif x == 'out':
prop_io = b.feed_side.properties_out[t]
return b.velocity_io[t, x] * b.area_cross == prop_io.flow_vol_phase['Liq']
# Darcy friction factor based on eq. S27 in SI for Cost Optimization of Osmotically Assisted Reverse Osmosis
# TODO: this relationship for friction factor is specific to a particular spacer geometry. Add alternatives.
@self.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Darcy friction factor constraint")
def eq_friction_factor_darcy_io(b, t, x):
return (b.friction_factor_darcy_io[t, x] - 0.42) * b.N_Re_io[t, x] == 189.3
# Pressure change per unit length due to friction,
# -1/2*f/dh*density*velocity^2
@self.Constraint(self.flowsheet().config.time,
self.io_list,
doc="pressure change per unit length due to friction")
def eq_dP_dx_io(b, t, x):
if x == 'in':
prop_io = b.feed_side.properties_in[t]
elif x == 'out':
prop_io = b.feed_side.properties_out[t]
return (b.dP_dx_io[t, x] * b.dh ==
-0.5 * b.friction_factor_darcy_io[t, x]
* prop_io.dens_mass_phase['Liq'] * b.velocity_io[t, x]**2)
# Average pressure change per unit length due to friction
@self.Expression(self.flowsheet().config.time,
doc="expression for average pressure change per unit length due to friction")
def dP_dx_avg(b, t):
return 0.5 * sum(b.dP_dx_io[t, x] for x in b.io_list)
# Pressure change equation
@self.Constraint(self.flowsheet().config.time,
doc="pressure change due to friction")
def eq_pressure_change(b, t):
return b.deltaP[t] == b.dP_dx_avg[t] * b.length
# Bulk and interface connection on the feed-side
@self.feed_side.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Temperature at interface")
def eq_equal_temp_interface_io(b, t, x):
if x == 'in':
prop_io = b.properties_in[t]
prop_interface_io = b.properties_interface_in[t]
elif x == 'out':
prop_io = b.properties_out[t]
prop_interface_io = b.properties_interface_out[t]
return prop_interface_io.temperature == \
prop_io.temperature
@self.feed_side.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Pressure at interface")
def eq_equal_pressure_interface_io(b, t, x):
if x == 'in':
prop_io = b.properties_in[t]
prop_interface_io = b.properties_interface_in[t]
elif x == 'out':
prop_io = b.properties_out[t]
prop_interface_io = b.properties_interface_out[t]
return prop_interface_io.pressure == \
prop_io.pressure
@self.feed_side.Constraint(self.flowsheet().config.time,
self.io_list,
doc="Volumetric flow at interface of inlet")
def eq_equal_flow_vol_interface_io(b, t, x):
if x == 'in':
prop_io = b.properties_in[t]
prop_interface_io = b.properties_interface_in[t]
elif x == 'out':
prop_io = b.properties_out[t]
prop_interface_io = b.properties_interface_out[t]
return prop_interface_io.flow_vol_phase['Liq'] ==\
prop_io.flow_vol_phase['Liq']
def initialize(
blk,
state_args=None,
outlvl=idaeslog.NOTSET,
solver="ipopt",
optarg={"tol": 1e-6}):
"""
General wrapper for pressure changer initialization routines
Keyword Arguments:
state_args : a dict of arguments to be passed to the property
package(s) to provide an initial state for
initialization (see documentation of the specific
property package) (default = {}).
outlvl : sets output level of initialization routine
optarg : solver options dictionary object (default={'tol': 1e-6})
solver : solver object or string indicating which solver to use during
initialization, if None provided the default solver will be used
(default = None)
Returns:
None
"""
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="unit")
solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag="unit")
# Set solver and options
# TODO: clean up once IDAES new API for initialize solvers is released
if isinstance(solver, str):
opt = SolverFactory(solver)
opt.options = optarg
else:
if solver is None:
opt = get_default_solver()
else:
opt = solver
opt.options = optarg
# ---------------------------------------------------------------------
# Initialize holdup block
flags = blk.feed_side.initialize(
outlvl=outlvl,
optarg=optarg,
solver=solver,
state_args=state_args,
)
init_log.info_high("Initialization Step 1 Complete.")
# ---------------------------------------------------------------------
# Initialize permeate
# Set state_args from inlet state
if state_args is None:
state_args = {}
state_dict = blk.feed_side.properties_in[
blk.flowsheet().config.time.first()].define_port_members()
for k in state_dict.keys():
if state_dict[k].is_indexed():
state_args[k] = {}
for m in state_dict[k].keys():
state_args[k][m] = state_dict[k][m].value
else:
state_args[k] = state_dict[k].value
| |
<reponame>ralic/gnu_pymp3frame
# Copyright (c) 2008 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division, absolute_import
import struct
import array
from . import mp3bits, mp3ext, frames, side_info, errors
class BaseSync(object):
"""BaseSync() -> object
Return an object that can store raw data from an MPEG audio file, identify
syncwords and tags within this data, and search for syncwords.
PhysicalFrameSync would normally be used instead."""
def __init__(self):
self.data = array.array('B')
self.bytes_returned = 0
# set to True when we see the EOF
self.read_eof = False
# the number of bytes in self.data that can be skipped when
# looking for a syncword
self.sync_skip = 0
# The stream will be considered synchronized when
# (head & sync_mask) == sync_header,
# where 'head' is the first 4 data bytes. Code may assume the high
# 11 bits will always be set in both sync_* variables. The MP3 spec
# suggests using the high 16 bits as a syncword once the expected
# value is known (but note that it's probably a bad idea to base
# sync_header on a VBR header frame).
self.sync_header = 0xffe0 << 16
self.sync_mask = self.sync_header
done = property(lambda s: s.read_eof and not len(s.data),
doc="True if all data from the input file has been processed.")
def fromfile(self, file, bytes=4096):
"""fromfile(file[, bytes]) -> None
Reads some data from the given file into the internal buffer."""
if self.read_eof:
raise errors.MP3UsageError('tried to write data after EOF')
try:
self.data.fromfile(file, bytes)
except EOFError:
# any data read before EOF will have been added
self.read_eof = True
def _is_sync(self, pos=0, sync_header=None, sync_mask=None):
d = self.data
head = ( (d[pos] << 24) | (d[pos+1] << 16)
| (d[pos+2] << 8) | d[pos+3] )
masked_head = (head & (sync_mask or self.sync_mask))
return masked_head == (sync_header or self.sync_header)
def resync(self, offset=0, header=None, mask=None):
"""resync(offset=0[, header, mask]) -> int
Find the next syncword, ignoring the first 'offset' bytes in the buffer.
The ignored bytes remain in the buffer, but won't be checked for sync
patterns in the future (unless sync_skip is reset). If 'header' and 'mask'
are specified, they override sync_header and sync_mask.
Returns the sync position (as a buffer offset), or -1."""
d = self.data
offset = max(offset, self.sync_skip)
while 1:
dsub = d
if offset > 0:
dsub = d[offset:]
try:
pos = dsub.index(255) + offset
except ValueError:
self.sync_skip = len(d)
return -1
if pos + 4 > len(d):
# too close to the end to check for sync
self.sync_skip += pos
return -1
elif self._is_sync(pos, header, mask):
self.sync_skip += pos
return self.sync_skip
self.sync_skip += pos + 1
offset = self.sync_skip
def identify(self):
"""identify() -> None or tuple
Identify the data at the beginning of the internal buffer.
Return value:
None - need more data
('sync',) - a syncword
('garbage', size) - unidentifiable data
('tag', size, type) - a comment tag
Note that the returned size may be greater than the amount of data
currently stored in the buffer."""
d = self.data
if len(d) < 4:
if self.read_eof and d:
return ('garbage', len(d))
else:
return None
if self._is_sync():
return ('sync',)
(tagtype, tagsize) = mp3ext.identify_tag(d, self.read_eof)
if tagsize > 0:
return ('tag', tagsize, tagtype)
elif tagsize == -1:
# need more data to determine whether this is a tag
return None
# this data looks like garbage; try to find the next syncword
# (we know there are at least 4 bytes in the buffer)
syncpos = self.resync()
if syncpos > 0:
return ('garbage', syncpos)
# we can't sync, but not all the data is necessarily garbage;
# there could be a partial sync pattern at the end
if self.sync_skip > 0:
return ('garbage', self.sync_skip)
else:
return None
def advance(self, bytes):
"""advance(bytes) -> None
Discards the specified number of bytes from the front of the buffer."""
if (bytes > len(self.data)) or (bytes < 0):
raise errors.MP3UsageError("invalid byte count")
self.bytes_returned += bytes
self.data = self.data[bytes:]
self.sync_skip = max(0, self.sync_skip - bytes)
class PhysicalFrameSync(BaseSync):
"""PhysicalFrameSync() -> object
Return an object that will interpret the various types of data found in an
MPEG audio file and construct objects for examining them."""
def __init__(self):
BaseSync.__init__(self)
self.synced = True
self.frames_returned = 0
# base_framesize is the size of a non-padded freeform frame; if set,
# we'll assume all frames are this size. A value of -1 (the default)
# means this will be autodetected; a value of 0 will disable this
# behaviour (and force frame sizes to be calculated by searching for
# the next syncword).
self.base_framesize = -1
def readitem(self):
"""readitem() -> None or 2-tuple
Remove the next item from the interal buffer and return it.
Return value:
None - need more data
('frame', MP3Frame)
('tag', CommentTag)
('garbage', array) - unidentifiable bytes"""
d = self.data
if len(d) < 4:
return None
ident = self.identify()
if not ident:
return None
dtype = ident[0]
if dtype != 'sync':
self.synced = (dtype != 'garbage')
size = ident[1]
if len(d) < size:
return None
data = d[:size]
self.advance(size)
if dtype == 'tag':
tagtype = ident[2]
return (dtype, frames.CommentTag(tagtype, data))
else:
return (dtype, data)
fr = self._create_frame()
if type(fr) == str:
# we got an error code instead of a frame
if fr == 'moredata':
if not self.read_eof:
return None
# treat all remaining data as garbage
size = len(d)
else:
assert fr == 'resync'
size = 1
self.synced = False
ret = d[:size]
self.advance(size)
return ('garbage', ret)
else:
return ('frame', fr)
# Assume there's a frame at the start of self.data, and return it.
# Returns an MP3Frame instance, 'resync', or 'moredata'
def _create_frame(self):
d = self.data
# we have a frame header; try to determine the frame size
head = frames.FrameHeader(d)
headsz = 4
if head.protection_bit == 0:
headsz += 2
try:
sz = head.frame_size
if head.layer_index == 1: # layer 3
sidesz = head.side_info_size
else:
sidesz = 0
if len(d) < (sz or (headsz + sidesz)):
return 'moredata'
except errors.MP3DataError:
# on closer inspection, this isn't a valid frame
return 'resync'
# we have the side info, if applicable;
# as well as the full frame if its size is known
if sidesz:
raw_si = d[headsz:headsz+sidesz]
si_obj = side_info.SideInfo(head.version_index,
head.channel_mode, raw_si)
else:
raw_si = None
si_obj = None
if not sz and self.base_framesize:
# this is a free-format frame; all such frames need to be the
# same size within a file (except for padding), and we have an
# expected size
sz = self.base_framesize
if head.padding:
sz += head.sample_size
if not sz:
# this is a free-format frame; we don't know the expected size,
# so we'll try to find the next syncword (and won't consider
# this a resync)
offset = headsz + sidesz
if si_obj:
# the frame can't end before part2_3_end,
# so skip all data until that point
offset += max(0, si_obj.part2_3_end)
# search for another syncword with the same MPEG version, layer,
# protection_bit, bitrate (free format), and samplerate
sync_header = (0xff << 24) | (d[1] << 16) | (d[2] << 8)
sz = self.resync(offset, sync_header, 0xfffffc00)
if sz == -1:
if len(d) >= 8192:
# we should have enough data to locate another syncword;
# assume this 'free-format frame' was just garbage
self.sync_skip = 0
return 'resync'
elif not self.read_eof:
return 'moredata'
# we won't be getting more data, so return everything
# until EOF -- excluding the id3v1 tag, if present
sz = len(d)
if sz > 128:
tagsz = mp3ext.id3v1_size(d[-128:], True)
if tagsz > 0:
sz -= tagsz
# found a syncword; now that the frame size is known,
# store it for future use
assert sz >= offset
if self.base_framesize < 0:
base_sz = sz
if head.padding:
base_sz -= head.sample_size
self.base_framesize = base_sz
assert sz > 0
if len(d) < sz:
return 'moredata'
# we have the full frame
fr = frames.MP3Frame()
fr.header = head
if si_obj:
fr.side_info = si_obj
fr.raw_body = d[headsz+sidesz:sz]
if head.protection_bit == 0:
fr.crc16 = (d[4] << 8) | d[5]
else:
fr.crc16 = None
fr.resynced = not self.synced
fr.frame_number = self.frames_returned
fr.byte_position = self.bytes_returned # managed by BaseSync
self.advance(sz)
self.frames_returned += 1
self.synced = True
return fr
class FileSyncWrapper(object):
"""FileSyncWrapper(sync, file) -> object
Return a wrapper that can be used to conveniently access a PhysicalFrameSync
or LogicalFrameSync instance; data will be automatically fed into this object
from the specified file as required."""
def __init__(self, sync, file):
self.file = file
self.sync = sync
self.max_buffer = 4*1024*1024
def readitem(self):
"""readitem()
Call sync.readitem() and return the result if it's not None.
Otherwise, feed the sync | |
the invoice, which will be shared with the payment provider.
:type provider_data: :obj:`typing.Union[typing.Dict, None]`
:param photo_url: URL of the product photo for the invoice.
:type photo_url: :obj:`typing.Union[base.String, None]`
:param photo_size: Photo size
:type photo_size: :obj:`typing.Union[base.Integer, None]`
:param photo_width: Photo width
:type photo_width: :obj:`typing.Union[base.Integer, None]`
:param photo_height: Photo height
:type photo_height: :obj:`typing.Union[base.Integer, None]`
:param need_name: Pass True, if you require the user's full name to complete the order
:type need_name: :obj:`typing.Union[base.Boolean, None]`
:param need_phone_number: Pass True, if you require the user's phone number to complete the order
:type need_phone_number: :obj:`typing.Union[base.Boolean, None]`
:param need_email: Pass True, if you require the user's email to complete the order
:type need_email: :obj:`typing.Union[base.Boolean, None]`
:param need_shipping_address: Pass True, if you require the user's shipping address to complete the order
:type need_shipping_address: :obj:`typing.Union[base.Boolean, None]`
:param is_flexible: Pass True, if the final price depends on the shipping method
:type is_flexible: :obj:`typing.Union[base.Boolean, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: A JSON-serialized object for an inline keyboard.
If empty, one 'Pay total price' button will be shown. If not empty, the first button must be a Pay button.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
prices = prepare_arg([price.to_python() if hasattr(price, 'to_python') else price for price in prices])
reply_markup = prepare_arg(reply_markup)
payload_ = generate_payload(**locals())
result = await self.request(api.Methods.SEND_INVOICE, payload_)
return types.Message(**result)
async def answer_shipping_query(self, shipping_query_id: base.String, ok: base.Boolean,
shipping_options: typing.Union[typing.List[types.ShippingOption], None] = None,
error_message: typing.Union[base.String, None] = None) -> base.Boolean:
"""
If you sent an invoice requesting a shipping address and the parameter is_flexible was specified,
the Bot API will send an Update with a shipping_query field to the bot.
Source: https://core.telegram.org/bots/api#answershippingquery
:param shipping_query_id: Unique identifier for the query to be answered
:type shipping_query_id: :obj:`base.String`
:param ok: Specify True if delivery to the specified address is possible and False if there are any problems
(for example, if delivery to the specified address is not possible)
:type ok: :obj:`base.Boolean`
:param shipping_options: Required if ok is True. A JSON-serialized array of available shipping options.
:type shipping_options: :obj:`typing.Union[typing.List[types.ShippingOption], None]`
:param error_message: Required if ok is False.
Error message in human readable form that explains why it is impossible to complete the order
(e.g. "Sorry, delivery to your desired address is unavailable').
Telegram will display this message to the user.
:type error_message: :obj:`typing.Union[base.String, None]`
:return: On success, True is returned.
:rtype: :obj:`base.Boolean`
"""
if shipping_options:
shipping_options = prepare_arg([shipping_option.to_python()
if hasattr(shipping_option, 'to_python')
else shipping_option
for shipping_option in shipping_options])
payload = generate_payload(**locals())
result = await self.request(api.Methods.ANSWER_SHIPPING_QUERY, payload)
return result
async def answer_pre_checkout_query(self, pre_checkout_query_id: base.String, ok: base.Boolean,
error_message: typing.Union[base.String, None] = None) -> base.Boolean:
"""
Once the user has confirmed their payment and shipping details,
the Bot API sends the final confirmation in the form of an Update with the field pre_checkout_query.
Use this method to respond to such pre-checkout queries.
Source: https://core.telegram.org/bots/api#answerprecheckoutquery
:param pre_checkout_query_id: Unique identifier for the query to be answered
:type pre_checkout_query_id: :obj:`base.String`
:param ok: Specify True if everything is alright (goods are available, etc.) and the
bot is ready to proceed with the order. Use False if there are any problems.
:type ok: :obj:`base.Boolean`
:param error_message: Required if ok is False.
Error message in human readable form that explains the reason for failure to proceed with the checkout
(e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you were busy filling
out your payment details. Please choose a different color or garment!").
Telegram will display this message to the user.
:type error_message: :obj:`typing.Union[base.String, None]`
:return: On success, True is returned.
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.ANSWER_PRE_CHECKOUT_QUERY, payload)
return result
# === Games ===
# https://core.telegram.org/bots/api#games
async def set_passport_data_errors(self,
user_id: base.Integer,
errors: typing.List[types.PassportElementError]) -> base.Boolean:
"""
Informs a user that some of the Telegram Passport elements they provided contains errors.
The user will not be able to re-submit their Passport to you until the errors are fixed
(the contents of the field for which you returned the error must change).
Returns True on success.
Use this if the data submitted by the user doesn't satisfy the standards your service
requires for any reason. For example, if a birthday date seems invalid, a submitted document
is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message
to make sure the user knows how to correct the issues.
Source https://core.telegram.org/bots/api#setpassportdataerrors
:param user_id: User identifier
:type user_id: :obj:`base.Integer`
:param errors: A JSON-serialized array describing the errors
:type errors: :obj:`typing.List[types.PassportElementError]`
:return: Returns True on success.
:rtype: :obj:`base.Boolean`
"""
errors = prepare_arg(errors)
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_PASSPORT_DATA_ERRORS, payload)
return result
# === Games ===
# https://core.telegram.org/bots/api#games
async def send_game(self, chat_id: base.Integer, game_short_name: base.String,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_to_message_id: typing.Union[base.Integer, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup, None] = None) -> types.Message:
"""
Use this method to send a game.
Source: https://core.telegram.org/bots/api#sendgame
:param chat_id: Unique identifier for the target chat
:type chat_id: :obj:`base.Integer`
:param game_short_name: Short name of the game, serves as the unique identifier for the game. \
Set up your games via Botfather.
:type game_short_name: :obj:`base.String`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: A JSON-serialized object for an inline keyboard.
If empty, one ‘Play game_title’ button will be shown. If not empty, the first button must launch the game.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
result = await self.request(api.Methods.SEND_GAME, payload)
return types.Message(**result)
async def set_game_score(self, user_id: base.Integer, score: base.Integer,
force: typing.Union[base.Boolean, None] = None,
disable_edit_message: typing.Union[base.Boolean, None] = None,
chat_id: typing.Union[base.Integer, None] = None,
message_id: typing.Union[base.Integer, None] = None,
inline_message_id: typing.Union[base.String,
None] = None) -> types.Message or base.Boolean:
"""
Use this method to set the score of the specified user in a game.
Source: https://core.telegram.org/bots/api#setgamescore
:param user_id: User identifier
:type user_id: :obj:`base.Integer`
:param score: New score, must be non-negative
:type score: :obj:`base.Integer`
:param force: Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters
:type force: :obj:`typing.Union[base.Boolean, None]`
:param disable_edit_message: Pass True, if the game message should not be automatically
edited to include the current scoreboard
:type disable_edit_message: :obj:`typing.Union[base.Boolean, None]`
:param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat
:type chat_id: :obj:`typing.Union[base.Integer, None]`
:param message_id: Required if inline_message_id is not specified. Identifier of the sent message
:type message_id: :obj:`typing.Union[base.Integer, None]`
:param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message
:type inline_message_id: :obj:`typing.Union[base.String, None]`
:return: On success, if the message was sent by the bot, returns the edited Message, otherwise returns True.
Returns an error, if the new score is not greater than the user's
current score in the chat and force is False.
:rtype: :obj:`typing.Union[types.Message, base.Boolean]`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.SET_GAME_SCORE, payload)
if isinstance(result, bool):
return result
return types.Message(**result)
async def get_game_high_scores(self, user_id: base.Integer,
chat_id: typing.Union[base.Integer, None] = None,
message_id: typing.Union[base.Integer, None] = None,
inline_message_id: typing.Union[base.String,
None] = None) -> typing.List[types.GameHighScore]:
"""
Use this method to get data for high score tables.
This method will currently return scores for the target user, plus two of his closest neighbors on each side.
Will also return the top three users if the user and his neighbors are not among them.
Please note that this behavior is subject to change.
Source: https://core.telegram.org/bots/api#getgamehighscores
:param user_id: Target user id
:type user_id: :obj:`base.Integer`
:param chat_id: Required if inline_message_id is not specified. Unique identifier | |
<gh_stars>0
#Last Update 09/11/2021 by AP
"""
ReadMe:
The workflow for xanes experiment is define below. This macro aims to use one flow for XANES of any given element.
This macro is designed to work with the GUI inputs as well.
To add a new element add the paramer file in the format given below
EXAMPLE OF USAGE:
For XANES Scan: <zp_list_xanes2d(FeXANES,dets1,zpssx,-13,11,150,zpssy,-13,11,150,0.05,
xcen = 0, ycen = 0,doAlignScan = False, alignElem = 'Fe',
alignX = (-1,1,100,0.1),
alignY = (-1,1,100,0.1), pdfElem = ['Fe','Cr'],
saveLogFolder = '/data/users/2021Q3/Ajith_2021Q3')
For Foil Calibration: <zp_list_xanes2d(e_list,dets6,mot1,x_s,x_e,x_num,mot2,y_s,y_e,y_num,accq_t,
xcen = 0, ycen = 0, doAlignScan = False, pdfLog = False,
foilCalibScan = True, peakBeam = False)
"""
import numpy as np
from datetime import datetime
import pandas as pd
import scipy.constants as consts
#Paramer list from previous runs in the order of atomic number of the element
CrXANES = {'high_e':6.03, 'low_e':5.97,
'high_e_ugap':6670, 'low_e_ugap':6620,
'high_e_crl':14, 'low_e_crl':14,'crl_comb':(8),
'high_e_zpz1':10.08, 'zpz1_slope':-5.9,
'energy':[(5.97,5.98,0.005),(5.981,6.03,0.001), (6.032,6.046,0.005)],
'mirrorCoating': 'Si'}
CrXANES_top = {'high_e':6.03, 'low_e':5.97,
'high_e_ugap':6670, 'low_e_ugap':6620,
'high_e_crl':14, 'low_e_crl':14,'crl_comb':(8),
'high_e_zpz1':10.433, 'zpz1_slope':-5.04,
'energy':[(5.97,5.98,0.005),(5.981,6.03,0.001), (6.032,6.046,0.005)],
'mirrorCoating': 'Si'}
CrXANES_bottom = {'high_e':6.03, 'low_e':5.97,
'high_e_ugap':6670, 'low_e_ugap':6620,
'high_e_crl':14, 'low_e_crl':14,'crl_comb':(8),
'high_e_zpz1':10.39, 'zpz1_slope':-5.04,
'energy':[(5.97,5.98,0.005),(5.981,6.03,0.001), (6.032,6.046,0.005)],
'mirrorCoating': 'Si'}
MnXANES = {'high_e':6.6, 'low_e':6.5,
'high_e_ugap':7142, 'low_e_ugap':7057,
'high_e_crl':-12, 'low_e_crl':-12,'crl_comb':(8,6),
'high_e_zpz1':68.3165, 'zpz1_slope':-5.04,
'energy':[(6.520,6.530,0.005),(6.531,6.580,0.001),(6.585,6.601,0.005)],
'mirrorCoating': 'Si'}
FeXANES = {'high_e':7.2, 'low_e':7.1,
'high_e_ugap':7695, 'low_e_ugap':7605,
'high_e_crl':4, 'low_e_crl':-6,'crl_comb':(12),
'high_e_zpz1':4.3826, 'zpz1_slope':-5.04,
'energy':[(7.08,7.10,0.002),(7.101,7.144,0.001),(7.146, 7.2, 0.002)],
'mirrorCoating': 'Si or Rh', 'zposaz':5000}
FeCalib = {'high_e':7.2, 'low_e':7.1,
'high_e_ugap':7695, 'low_e_ugap':7605,
'high_e_crl':4, 'low_e_crl':-6,'crl_comb':(12),
'high_e_zpz1':4.3826, 'zpz1_slope':-5.04,
'energy':[(7.08, 7.15, 0.001)],
'mirrorCoating': 'Si or Rh', 'zposaz':5000}
ZnXANES = {'high_e':9.7, 'low_e':9.6,
'high_e_ugap':6485, 'low_e_ugap':6435,
'high_e_crl':5., 'low_e_crl':2.,'crl_comb':(22+4),
'high_e_zpz1':50.87, 'zpz1_slope':-5.9,
'energy':[(9.64,9.666,0.005),(9.6665,9.681,.0005),(9.682,9.701,0.002),(9.705,9.725,0.005)]}
NiXANES = {'high_e':8.427, 'low_e':8.3,
'high_e_ugap':5834, 'low_e_ugap':5768,
'high_e_crl':11.81, 'low_e_crl':8,'crl_comb':(12+4),
'high_e_zpz1':58.32, 'zpz1_slope':-5.9,
'energy':[(8.30,8.32,0.005),(8.321,8.38,0.001),(8.382,8.430,0.005)],
'mirrorCoating': 'Cr','zposaz':3500}
CuXANES = {'high_e':9.06, 'low_e':8.96,
'high_e_ugap':6165, 'low_e_ugap':6115,
'high_e_crl':10, 'low_e_crl':10,'crl_comb':(22),
'high_e_zpz1':-4.905, 'zpz1_slope':-5.04,
'energy':[(8.96,8.975,0.005),(8.976,9.003,0.001)],
'mirrorCoating': 'Cr','zposaz':3500}
CuCalib = {'high_e':9.05, 'low_e':8.96,
'high_e_ugap':6160, 'low_e_ugap':6115,
'high_e_crl':10, 'low_e_crl':10,'crl_comb':(22),
'high_e_zpz1':-4.76, 'zpz1_slope':-5.04,
'energy':[(8.96,9.04,0.001)],
'mirrorCoating': 'Cr','zposaz':3500}
FeXANES= {'high_e':7.5, 'low_e':7.1,
'high_e_zpz1':2.73, 'zpz1_slope':-5.04,
'energy':[(6.97,7.11,0.005),(7.111,7.150,0.001),(7.155,7.6,0.005)],
'mirror': 'Si','pitch' :0.4725, 'm2p':1.301442}
"""
#Hafnium L_III edge
#ZP #1, 244 um dia, 30 nm outmost, crl# 22, 3
pre = np.linspace(9.540,9.550,6)
XANES1 = np.linspace(9.552,9.580,29)
post = np.linspace(9.582,9.640,30)
#Hafnium L_III edge
#ZP #1, 244 um dia, 30 nm outmost, crl# 22, 3
pre = np.linspace(9.400,9.540,15)
XANES1 = np.linspace(9.542,9.560,10)
XANES2 = np.linspace(9.561,9.571,21)
XANES3 = np.linspace(9.572,9.642,36)
post = np.linspace(9.644,9.84,50)
PreAs = np.linspace(11845,11860,6)
As_XANES = np.linspace(11861,11885,49)
PostAs = np.linspace(11886,11901,6)
"""
######################################
######### FUNCTIONS BELOW ############
######################################
#copied from larch --modified
KTOE = 1.e20*consts.hbar**2 / (2*consts.m_e * consts.e) # 3.8099819442818976
ETOK = 1.0/KTOE
def etok(energy):
"""convert photo-electron energy to wavenumber"""
if isinstance(energy, list):
energy = np.array(energy)
if energy < 0: return 0
return np.around(np.sqrt(energy*ETOK),2)
def ktoe(k):
"""convert photo-electron wavenumber to energy"""
if isinstance(k, list):
k = np.array(k)
return np.around(k*k*KTOE, 1)
def generateEPoints(ePointsGen = [(9.645,9.665,0.005),(9.666,9.7,0.0006),(9.705,9.725,0.005)],reversed = True):
"""
Generates a list of energy values from the given list
input: Tuples in the format (start energy, end energy, energy resolution),
if reversed is true the list will be transposed
return : list of energy points
"""
e_points = []
for values in ePointsGen:
#use np.arange to generate values and extend it to the e_points list
e_points.extend(np.arange(values[0],values[1],values[2]))
if reversed:
#retrun list in the reversted order
return e_points[::-1]
else:
return e_points
def generateEList(XANESParam = CrXANES, highEStart = True):
"""
Generates a pandas dataframe of optics motor positions. Function uses high E and low E values in the dictionary
to generate motor positions for all the energy points, assuming linear relationship.
input: Dictionary conating optics values at 2 positions (high E and low E), option to start from high E or low E
return : Dataframe looks like below;
energy ugap crl_theta ZP focus
0 7.175 7652.5 1.75 65.6575
1 7.170 7648.0 1.30 65.6870
2 7.165 7643.5 0.85 65.7165
3 7.160 7639.0 0.40 65.7460
4 7.155 7634.5 -0.05 65.7755
"""
# empty dataframe
e_list = pd.DataFrame()
#add list of energy as first column to DF
e_list['energy'] = generateEPoints (ePointsGen = XANESParam ['energy'], reversed = highEStart)
#read the paramer dictionary and calculate ugap list
high_e, low_e = XANESParam['high_e'],XANESParam['low_e']
#zone plate increament is very close to the theorticla value , same step as above for zp focus
zpz1_ref, zpz1_slope = XANESParam['high_e_zpz1'],XANESParam['zpz1_slope']
zpz1_list = zpz1_ref + (e_list['energy'] - high_e)*zpz1_slope
e_list['ZP focus'] = zpz1_list
#return the dataframe
return e_list
def peak_the_flux():
""" Scan the c-bpm set points to find IC3 maximum """
print("IC3is below threshold; Peaking the beam.")
yield from bps.sleep(2)
yield from peak_bpm_y(-5,5,10)
yield from bps.sleep(1)
yield from peak_bpm_x(-15,15,6)
yield from bps.sleep(1)
yield from peak_bpm_y(-2,2,4)
def move_energy(e,zpz_ ):
yield from bps.sleep(1)
#tuning the scanning pv on to dispable c bpms
caput('XF:03IDC-ES{Status}ScanRunning-I', 1)
yield from Energy.move(e, 3)
yield from mov_zpz1(zpz_)
yield from bps.sleep(1)
def zp_list_xanes2d(elemParam,dets,mot1,x_s,x_e,x_num,mot2,y_s,y_e,y_num,accq_t,highEStart = True,
doAlignScan = True, alignX = (-2,2,100,0.1,'Fe',0.7, True),
alignY = (-2,2,100,0.1,'Fe',0.7, True),
pdfElem = ('Fe','Cr'),doScan = True, moveOptics = True,pdfLog = True,
foilCalibScan = False, peakBeam = True,
saveLogFolder = '/home/xf03id/Downloads'):
"""
Function to run XANES Scan.
Arguments:
1. elemParam: Dictionary - containg low and high energy optics positions and other useful info
2. dets: list - detector system in use
3. mot1, mot2: EpicsMotors- Motors used for 2D scanning (eg: zpssx, zpssy, etc)
4. xs,xe,ys,ye: float - scan start and end positions in X&Y directions
5. x_num,y_num: float - number of steps in X&Y directions
6. accq_t: float - aquistion (dwell) time for flyscan
7. highEStart: boolean - if True start the stack with high energies first (Descenting order)
8. doAlignScan: boolean - if True registration scans will be performed before the 2D scan
9. xcen, ycen; positions where alignemnt scan would be done. This number updates after each alignment scan
10. Options for reginstration scans
11. Options to save XRFs to pdf after each scan
12. Options to do foil calibration scans
13. Save important information in CSV format to selected forlder
14. The user can turn on and off alignemnt scans
"""
# marker to track beam dump
beamDumpOccured = False
e_list = generateEList(elemParam, highEStart = highEStart)
#add real energy to the dataframe
e_list['E Readback'] = np.nan
#add scan id to the dataframe
e_list['Scan ID'] = np.nan
#recoed time
e_list['TimeStamp'] = pd.Timestamp.now()
#Ic values are useful for calibration
e_list['IC3'] = sclr2_ch4.get()
e_list['IC0'] = sclr2_ch2.get()
e_list['IC3_before_peak'] = sclr2_ch2.get()
#record if peak beam happed before the scan
e_list['Peak Flux'] = False
print(e_list.head())
yield from bps.sleep(10)#time to quit if anything wrong
#get intal ic1 value
ic_0 = sclr2_ch2.get()
#opening fast shutter for initial ic3 reading
#caput('XF:03IDC-ES{Zeb:2}:SOFT_IN:B0',1)
yield from bps.sleep(2)
#get the initial ic3 reading for peaking the beam
ic_3_init = sclr2_ch4.get()
#close fast shutter after initial ic3 reading
#caput('XF:03IDC-ES{Zeb:2}:SOFT_IN:B0',0)
#remeber the start positions
zpssz_i = zpssz.position
zpssy_i = zpssy.position
for i in range (len(e_list)):
#if beam dump occur turn the marker on
if sclr2_ch2.get()<0.1*ic_0:
beamDumpOccured = True
#wait if beam dump occured beamdump
yield from check_for_beam_dump(threshold=0.1*ic_0)
if beamDumpOccured:
#wait for about 3 minutes for all the feedbacks to kick in
yield from bps.sleep(200)
#redo the previous energy
e_t, ugap_t, crl_t, zpz_t, *others = e_list.iloc[i-1]
#turn off the beamdump marker
beamDumpOccured = False
else:
#unwrap df row for energy change
e_t, ugap_t, crl_t, zpz_t, *others = e_list.iloc[i]
if moveOptics:
yield from move_energy(e_t,zpz_t)
else: pass
#open fast shutter to check if ic3 reading is satistactory
#caput('XF:03IDC-ES{Zeb:2}:SOFT_IN:B0',1)
yield from bps.sleep(3)
#get ic3 value before peaking, e change
ic3_ = sclr2_ch4.get()
# if ic3 value is below the threshold, peak the beam
if ic3_ < ic_3_init*0.8:
if peakBeam: yield from peak_the_flux()
fluxPeaked = True # for df record
else:
fluxPeaked = False
#for df
ic_3 = sclr2_ch4.get()
ic_0 = sclr2_ch2.get()
# move to particle location for alignemnt scan
#if doAlignScan:
#yield from bps.mov(zpssx, xcen)
#yield from bps.mov(zpssy, ycen)
#do the alignemnt scan on the xanes elem after it excited ,
#otherwise skip or use another element
if e_list['energy'][i]<0: # for special scans if no align elem available
'''
yield from fly1d(dets,zpssx,-1,1,100,0.1)
xcen = return_line_center(-1,'Cl',0.7)
yield from bps.mov(zpssx, xcen)
yield from fly1d(dets,zpssy,-1,1 ,100,0.1)
ycen = return_line_center(-1,'Cl',0.7)
yield from bps.mov(zpssy, ycen)
'''
pass
elif doAlignScan:
if alignX[-1]:
yield | |
LZMA is a
## stream it will uncompress some data. If no data can be decompressed
## at all, it is not a valid LZMA stream.
lzmasizeknown = False
if lzmasizebytes != '\xff\xff\xff\xff\xff\xff\xff\xff':
lzmasize = struct.unpack('<Q', lzmasizebytes)[0]
## XZ Utils rejects files with uncompressed size of 256 GiB
if lzmasize > 274877906944:
continue
## if the size is 0, why even bother?
if lzmasize == 0:
continue
lzmasizeknown = True
## either read all bytes that are left in the file or a minimum
## amount of bytes, whichever is the smallest
minlzmadatatoread = 10000000
lzmabytestoread = min(filesize-offset, minlzmadatatoread)
lzmafile = open(filename, 'rb')
lzmafile.seek(offset)
lzmadata = lzmafile.read(lzmabytestoread)
if len(lzmadata) < 14:
lzmafile.close()
continue
lzma_extra_strict = False
if not lzma_try_all:
## quite a few LZMA streams have '\x00' at byte 14, but not all
if not lzmadata[14] == '\x00' and lzma_extra_strict:
lzmafile.close()
continue
p = subprocess.Popen(['lzma', '-cd', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate(lzmadata)
if p.returncode == 0:
lzmafile.close()
## whole stream successfully unpacked and there was
## no trailing data
tmpdir = dirsetup(tempdir, filename, "lzma", counter)
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.write(tmpfile[0], stanout)
os.fdopen(tmpfile[0]).close()
diroffsets.append((tmpdir, offset, len(lzmadata)))
blacklist.append((offset, offset+len(lzmadata)))
counter += 1
continue
if len(stanout) == 0:
## no data was successfully unpacked, so this is not
## a valid LZMA stream
lzmafile.close()
continue
## The data seems to be a valid LZMA stream, but not all LZMA
## data was unpacked.
if lzmafile.tell() == filesize:
## dunno what to do in this case
pass
lzmafile.close()
## If there is a very big difference (thousandfold) between
## the unpacked data and the declared size it is a false positive
## for sure
## TODO: make lzmacutoff configurable
lzmacutoff = 1000
if lzmasizeknown:
if len(stanout) != lzmasize:
if len(stanout) < lzmacutoff:
if lzmasize/len(stanout) > 1000:
continue
else:
## there is a very big chance that it actually
## is a false positive
pass
else:
## all data has been unpacked
tmpdir = dirsetup(tempdir, filename, "lzma", counter)
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.write(tmpfile[0], stanout)
os.fdopen(tmpfile[0]).close()
diroffsets.append((tmpdir, offset, 0))
counter += 1
continue
else:
if len(stanout) < lzmacutoff:
if lzmabytestoread/len(stanout) > 1000:
continue
## TODO: check if the output consists of a single character that
## has been repeated
tmpdir = dirsetup(tempdir, filename, "lzma", counter)
res = unpackLZMA(filename, offset, template, tmpdir, lzmalimit, lzma_tmpdir, blacklist)
if res != None:
(diroffset, wholefile) = res
if wholefile:
lzmasize = filesize - offset
diroffsets.append((diroffset, offset, lzmasize))
blacklist.append((offset, filesize))
if offset == 0:
newtags.append('compressed')
newtags.append('lzma')
else:
diroffsets.append((diroffset, offset, 0))
blacklist.append((offset, offset+len(lzmadata)))
counter = counter + 1
else:
## cleanup
os.rmdir(tmpdir)
lzma_file.close()
return (diroffsets, blacklist, newtags, hints)
## tries to unpack stuff using lzma -cd. If it is successful, it will
## return a directory for further processing, otherwise it will return None.
## Newer versions of XZ (>= 5.0.0) have an option to test and list archives.
## Unfortunately this does not work for files with trailing data, so we can't
## use it to filter out "bad" files.
def unpackLZMA(filename, offset, template, tempdir=None, minbytesize=1, lzma_tmpdir=None, blacklist=[]):
tmpdir = unpacksetup(tempdir)
## if UNPACK_TEMPDIR is set to for example a ramdisk use that instead.
if lzma_tmpdir != None:
tmpfile = tempfile.mkstemp(dir=lzma_tmpdir)
os.fdopen(tmpfile[0]).close()
outtmpfile = tempfile.mkstemp(dir=lzma_tmpdir)
unpackFile(filename, offset, tmpfile[1], lzma_tmpdir, blacklist=blacklist)
else:
tmpfile = tempfile.mkstemp(dir=tmpdir)
os.fdopen(tmpfile[0]).close()
outtmpfile = tempfile.mkstemp(dir=tmpdir)
unpackFile(filename, offset, tmpfile[1], tmpdir, blacklist=blacklist)
p = subprocess.Popen(['lzma', '-cd', tmpfile[1]], stdout=outtmpfile[0], stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
wholefile = False
if p.returncode == 0:
wholefile = True
os.fdopen(outtmpfile[0]).close()
os.unlink(tmpfile[1])
## sanity checks if the size is set
lzmafile = open(filename, 'rb')
lzmafile.seek(offset+5)
lzmasizebytes = lzmafile.read(8)
lzmafile.close()
## check if the size of the uncompressed data is recorded
## in the binary
if lzmasizebytes != '\xff\xff\xff\xff\xff\xff\xff\xff':
lzmasize = struct.unpack('<Q', lzmasizebytes)[0]
if os.stat(outtmpfile[1]).st_size != lzmasize:
os.unlink(outtmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
else:
if os.stat(outtmpfile[1]).st_size < minbytesize:
os.unlink(outtmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
if lzma_tmpdir != None:
## create the directory and move the LZMA file
try:
os.makedirs(tmpdir)
except OSError, e:
pass
if template != None:
mvpath = os.path.join(tmpdir, template)
if not os.path.exists(mvpath):
try:
shutil.move(outtmpfile[1], mvpath)
except Exception, e:
pass
else:
shutil.move(outtmpfile[1], tmpdir)
else:
if template != None:
mvpath = os.path.join(tmpdir, template)
if not os.path.exists(mvpath):
try:
shutil.move(outtmpfile[1], mvpath)
except Exception, e:
pass
return (tmpdir, wholefile)
## Search and unpack Ubi. Since we can't easily determine the length of the
## file system by using ubi we will have to use a different measurement to
## measure the size of ubi. A good start is the sum of the size of the
## volumes that were unpacked.
def searchUnpackUbi(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
hints = {}
if not 'ubi' in offsets:
return ([], blacklist, [], hints)
if offsets['ubi'] == []:
return ([], blacklist, [], hints)
datafile = open(filename, 'rb')
## We can use the values of offset and ubisize where offset != -1
## to determine the ranges for the blacklist.
diroffsets = []
counter = 1
## TODO: big file fixes
data = datafile.read()
datafile.close()
for offset in offsets['ubi']:
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
continue
tmpdir = dirsetup(tempdir, filename, "ubi", counter)
res = unpackUbi(data, offset, tmpdir)
if res != None:
(ubitmpdir, ubisize) = res
diroffsets.append((ubitmpdir, offset, ubisize))
blacklist.append((offset, offset+ubisize))
## TODO use ubisize to set the blacklist correctly
counter = counter + 1
else:
## cleanup
os.rmdir(tmpdir)
return (diroffsets, blacklist, [], hints)
def unpackUbi(data, offset, tempdir=None):
tmpdir = unpacksetup(tempdir)
tmpfile = tempfile.mkstemp()
os.write(tmpfile[0], data[offset:])
## take a two step approach: first unpack the UBI images,
## then extract the individual files from these images
p = subprocess.Popen(['ubi_extract_images.py', '-o', tmpdir, tmpfile[1]], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
os.fdopen(tmpfile[0]).close()
os.unlink(tmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
else:
p = subprocess.Popen(['ubi_display_info.py', tmpfile[1]], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
os.fdopen(tmpfile[0]).close()
os.unlink(tmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
stanoutlines = stanout.split('\n')
for s in stanoutlines:
if 'PEB Size' in s:
blocksize = int(s.split(':')[1].strip())
if 'Total Block Count' in s:
blockcount = int(s.split(':')[1].strip())
ubisize = blocksize * blockcount
## clean up the temporary files
os.fdopen(tmpfile[0]).close()
os.unlink(tmpfile[1])
## determine the sum of the size of the unpacked files
## now the second stage, unpacking the images that were extracted
ubitmpdir = os.path.join(tmpdir, os.path.basename(tmpfile[1]))
for i in os.listdir(ubitmpdir):
p = subprocess.Popen(['ubi_extract_files.py', '-o', tmpdir, os.path.join(ubitmpdir, i)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(stanout, stanerr) = p.communicate()
os.unlink(os.path.join(ubitmpdir, i))
os.rmdir(ubitmpdir)
return (tmpdir, ubisize)
## unpacking for ARJ. The file format is described at:
## http://www.fileformat.info/format/arj/corion.htm
## Although there is no trailer the arj program can be used to at least give
## some information about the uncompressed size of the archive.
## Please note: these files can also be unpacked with 7z, which could be
## a little bit faster. Since 7z is "smart" and looks ahead useful information
## like the actual offset that is used for reporting and blacklisting could
## be lost.
## WARNING: this method is very costly. Since ARJ is not used on many Unix
## systems it is advised to not enable it when scanning binaries intended for
## these systems.
def searchUnpackARJ(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
hints = {}
if not 'arj' in offsets:
return ([], blacklist, [], hints)
if offsets['arj'] == []:
return ([], blacklist, [], hints)
diroffsets = []
counter = 1
for offset in offsets['arj']:
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
continue
tmpdir = dirsetup(tempdir, filename, "arj", counter)
res = unpackARJ(filename, offset, tmpdir)
if res != None:
(arjtmpdir, arjsize) = res
diroffsets.append((arjtmpdir, offset, arjsize))
blacklist.append((offset, arjsize))
counter = counter + 1
else:
## cleanup
os.rmdir(tmpdir)
return (diroffsets, blacklist, [], hints)
def unpackARJ(filename, offset, tempdir=None):
tmpdir = unpacksetup(tempdir)
tmpfile = tempfile.mkstemp(dir=tmpdir, suffix=".arj")
os.fdopen(tmpfile[0]).close()
unpackFile(filename, offset, tmpfile[1], tmpdir)
## first check archive integrity
p = subprocess.Popen(['arj', 't', tmpfile[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=tmpdir)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
## this is not an ARJ archive
os.unlink(tmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
else:
p = subprocess.Popen(['arj', 'x', tmpfile[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=tmpdir)
(stanout, stanerr) = p.communicate()
if p.returncode != 0:
os.unlink(tmpfile[1])
if tempdir == None:
os.rmdir(tmpdir)
return None
## everything has been unpacked, so we can get the size.
p = subprocess.Popen(['arj', 'v', tmpfile[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=tmpdir)
(stanout, stanerr) = p.communicate()
stanoutlines = stanout.strip().split("\n")
## we should do more sanity checks here
arjsize = int(stanoutlines[-1].split()[-2])
## always clean up the old temporary files
os.unlink(tmpfile[1])
return (tmpdir, arjsize)
## extraction of Windows .ICO files. The identifier for .ICO files is very
## common, so on large files this will have a rather big performance impact
## with relatively little gain.
## This scan should only be enabled if verifyIco is also enabled
def searchUnpackIco(filename, tempdir=None, blacklist=[], offsets={}, scanenv={}, debug=False):
diroffsets = []
hints = {}
counter = 1
offset = 0
template = None
blacklistoffset = extractor.inblacklist(offset, blacklist)
if blacklistoffset != None:
return (diroffsets, blacklist, [], hints)
## now check how many images there are in the file
icofile = open(filename, 'rb')
icofile.seek(4)
icobytes = icofile.read(2)
icocount = struct.unpack('<H', icobytes)[0]
## the ICO format first has all the headers, then the image data
for i in xrange(0,icocount):
tmpdir = dirsetup(tempdir, filename, "ico", counter)
icoheader = icofile.read(16)
## grab the size of the icon, plus the offset where it can
## be found in the file
icosize = struct.unpack('<I', icoheader[8:12])[0]
icooffset = struct.unpack('<I', icoheader[12:16])[0]
ispng = False
oldoffset = icofile.tell()
icofile.seek(icooffset)
icobytes = icofile.read(icosize)
if len(icobytes) > 45:
if icobytes[:8] == fsmagic.fsmagic['png']:
ispng = True
if ispng:
tmpfile = os.path.join(tmpdir, "unpack-%d.png" % counter)
else:
tmpfile = os.path.join(tmpdir, "unpack-%d.bmp" % counter)
icooutput = open(tmpfile, 'wb')
if not ispng:
## it is a BMP. This means that the BMP header needs to be
## reconstructed first. According to the specification on
## wikipedia the bitmap data in the ICO file isn't
## regular bitmap data (because of a XOR mask), so skip
## for now.
pass
'''
if icobytes[:4] == '\x28\x00\x00\x00':
icooutput.write('BM')
## BMP magic, header is 14 long
bmpsize = len(icobytes) + 14
## BMP size
icooutput.write(struct.pack('<I', | |
# base class
import numpy as np
import warnings
class DictNpArrayMix:
""" The basic class of data structure
The member functions are initialized by provided keys in initial function
Member functions can be accessed by using the stype of either Dictonary or numpy.ndarray
"""
def __init__(self, keys, _dat=None, _offset=int(0), _append=False, **kwargs):
"""
Parameters
----------
keys: list of class member name and the corresponding types or numpy.ndarray shape
Class members list description. Defined by inherited types
For exmaple: keys=[['mass',numpy.float64],['pos',(numpy.float64,3)],['sub1',typename],['sub2',(typename,kwargs)]], will provide class members: mass (1D numpy.ndarray), pos ( 2D numpy.ndarray with a shape of (*,3)), sub1 (a class instance with the type of typename) and sub2 (a type based on DictNpArrayMix with additional keyword arguments, kwargs)
_dat: numpy.ndarray | type(self) | None
If it is 2D numpy.ndarray type data, read data as readArray function
If it is the same class type, copy the data
If it is None, initial class with empty data
_offset: int (0)
Reading column offset of _dat if it is 2D np.ndarray
_append: bool (False)
If true, append keys and ncols to the current class instead of create new class members
kwargs: dict ()
keyword arguments, defined by inherited types
"""
self.initargs = kwargs.copy()
if (_append): self.keys = self.keys + keys
else: self.keys = keys.copy()
if (issubclass(type(_dat), DictNpArrayMix)):
icol = int(0)
for key, parameter in keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key] = parameter(_dat.__dict__[key], **kwargs)
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = _dat.__dict__[key].copy()
icol += 1
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given ',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat.__dict__[key].copy()
icol += parameter[1]
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key] = parameter[0](_dat.__dict__[key], **{**kwargs, **parameter[1]})
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = _dat.__dict__[key].copy()
icol += 1
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
if (_append): self.ncols += int(icol)
else: self.ncols = int(icol)
self.size = _dat.size
elif (type(_dat)==np.ndarray):
icol = _offset
self.size = int(0)
for key, parameter in keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key] = parameter(_dat, icol, False, **kwargs)
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter)
icol += 1
self.size += self.__dict__[key].size
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given ',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat[:,icol:icol+parameter[1]].astype(parameter[0])
icol += parameter[1]
self.size += self.__dict__[key].size
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key] = parameter[0](_dat, icol, False, **{**kwargs, **parameter[1]})
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter[0])
icol += 1
self.size += self.__dict__[key].size
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
icol -= _offset
if (_append): self.ncols += int(icol)
else: self.ncols = int(icol)
self.size = int(self.size/icol)
if (self.size != _dat.shape[0]):
raise ValueError('Reading error, final counted size ',self.size,' is not consistent with reading ndarray shape',_dat.shape[0])
elif (_dat==None):
icol = int(0)
for key, parameter in keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key] = parameter(**kwargs)
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = np.empty(0).astype(parameter)
icol += 1
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given b',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = np.empty([0,parameter[1]]).astype(parameter[0])
icol += parameter[1]
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key] = parameter[0](**{**kwargs, **parameter[1]})
icol += self.__dict__[key].ncols
else:
self.__dict__[key] = np.empty(0).astype(parameter[0])
icol += 1
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
if (_append): self.ncols += int(icol)
else: self.ncols = int(icol)
self.size = int(0)
else:
raise ValueError('Initial fail, date type should be ',type(self),' or np.ndarray, given ',type(_dat))
def readArray(self, _dat, _offset=int(0), ncol_check=True, **kwargs):
""" Read class member data from a 2D numpy.ndarray
Parameters
----------
_dat: numpy.ndarray
Read 2D array, rows are the event, columns are members. The class members are filled in the order of items in keys provided in the initial function.
For exmaple: if keys are [['mass',1],['pos',3]], the member mass = _dat[:,_offset] and pos = _dat[:,_offset+1:_offset+3]
_offset: int (0)
Reading column offset of _dat if it is 2D np.ndarray
ncol_check: bool (True)
If True, check whether self.ncols (the number of columns in class) - _offset == _dat.shape[1] (the number of columns). If not equal, output warning
kwaygs: dict ()
keyword arguments
"""
if (self.ncols + _offset != _dat.shape[1]) & (ncol_check):
warnings.warn('The reading data shape[1] or the number of columns (%d) mismatches the number of columns defined in the class instance (%d)! Make sure whether this is intended and whether you properly choose the correct keyword arguments for the class instance initialziation' % (_dat.shape[1], self.ncols+_offset))
icol = _offset
self.size = int(0)
for key, parameter in self.keys:
if (type(parameter) == type):
if (issubclass(parameter, DictNpArrayMix)):
self.__dict__[key].readArray(_dat, icol, False, **kwargs)
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter)
icol += 1
self.size += self.__dict__[key].size
#raise ValueError('Initial fail, unknown key type, should be inherience of DictNpArrayMix, given ',parameter)
elif (type(parameter)==tuple):
if (type(parameter[0]) == type) & (type(parameter[1]) == int):
self.__dict__[key] = _dat[:,icol:icol+parameter[1]].astype(parameter[0])
icol += parameter[1]
self.size += self.__dict__[key].size
elif (type(parameter[0]) == type) & (type(parameter[1])==dict):
if(issubclass(parameter[0], DictNpArrayMix)):
self.__dict__[key].readArray(_dat, icol, False, **{**kwargs,**parameter[1]})
icol += self.__dict__[key].ncols
self.size += self.__dict__[key].size*self.__dict__[key].ncols
else:
self.__dict__[key] = _dat[:,icol].astype(parameter[0])
icol += 1
self.size += self.__dict__[key].size
else:
raise ValueError('Initial fail, unknown key type ',parameter[0],' and column count ', parameter[1] )
else:
raise ValueError('Initial fail, unknown key parameter, should be DictNpArrayMix type name or value of int, given ',parameter)
icol -= _offset
self.size = int(self.size/icol)
if (self.size != _dat.shape[0]):
raise ValueError('Reading error, final counted size ',self.size,' is not consistent with reading ndarray shape',_dat.shape[0])
if (self.ncols != icol):
raise ValueError('Column number inconsistence, self ncols ',self.ncols,' key ncols ', icol)
def __getitem__(self, k):
""" Map getitem to all members generated from the keys in the initial function, and return a new data filtered by k
If the member is an inherited type of DictNpArrayMix, also map all sub-members if it.
Parameters
----------
k: filter
The same type of arguments for numpy.ndarray.__getitem__
"""
if (type(k)==str):
return self.__dict__[k]
else:
cls_type = type(self)
new_dat = cls_type(**self.initargs)
new_dat.ncols = self.ncols
new_dat.size = int(0)
new_dat.keys = self.keys.copy()
icol = int(0)
for key_type in new_dat.keys:
key = key_type[0]
item = self.__dict__[key]
if (type(item) == np.ndarray):
if item.shape[0]!=self.size:
raise ValueError('Member ',key,' size/dimension',item.shape, ' is not consistent with the data size',self.size)
new_dat.__dict__[key] = item[k]
new_dat.size += new_dat.__dict__[key].size
if (len(item.shape)>1): icol += item.shape[1]
else: icol += 1
elif (issubclass(type(item), DictNpArrayMix)):
new_item = item[k]
new_dat.__dict__[key] = new_item
new_dat.size += new_item.size*new_item.ncols
icol += new_item.ncols
new_dat.size = int(new_dat.size/new_dat.ncols)
if (icol != new_dat.ncols):
raise ValueError('Column number inconsistent, counted:',icol,' saved ncols:',new_dat.ncols,'keys:',new_dat.keys,'fileter: ',k,' original size:',self.size,' original ncols:',self.ncols)
return new_dat
def __setitem__(self, k, data):
""" Map setitem to all members generated from the keys in the initial function
If the member is an inherited type of DictNpArrayMix, also map all sub-members if it.
Parameters
----------
k: filter
The same type of arguments for numpy.ndarray.__getitem__
data: numpy.ndarray | DictNpArrayNix
The new data to set
"""
if (type(k)==str):
self.__dict__[k] = data
else:
for key_type in self.keys:
key = key_type[0]
self.__dict__[key][k] = data[key]
# def keys(self):
# return self.__dict__.keys()
def addNewMember(self, key, member):
""" Add a new class member
The ncols is updated also.
Be careful if the target for adding members is a sub member, the ncols of its parent is not updated.
This can cause issue for the parent when the size of data is needed to calculate.
Thus after calling of this function, please also increase the ncols of parents for consistence.
Parameters
----------
key: string
new member name
member: numpy.ndarray | DictNpArrayNix
data binding to | |
A 1 43 ? 47.059 1.900 1.866 1.00 10.97 ? 43 ARG A CB 1
ATOM 126 C CG . ARG A 1 43 ? 47.883 3.004 2.549 1.00 10.56 ? 43 ARG A CG 1
ATOM 127 C CD . ARG A 1 43 ? 47.060 4.215 3.004 1.00 12.29 ? 43 ARG A CD 1
ATOM 128 N NE . ARG A 1 43 ? 46.128 3.823 4.025 1.00 12.31 ? 43 ARG A NE 1
ATOM 129 C CZ . ARG A 1 43 ? 46.262 3.954 5.340 1.00 10.80 ? 43 ARG A CZ 1
ATOM 130 N NH1 . ARG A 1 43 ? 47.361 4.515 5.861 1.00 11.15 ? 43 ARG A NH1 1
ATOM 131 N NH2 . ARG A 1 43 ? 45.307 3.484 6.136 1.00 11.78 ? 43 ARG A NH2 1
ATOM 132 N N . LEU A 1 44 ? 50.045 0.234 1.984 1.00 11.16 ? 44 LEU A N 1
ATOM 133 C CA . LEU A 1 44 ? 50.993 -0.529 2.805 1.00 11.08 ? 44 LEU A CA 1
ATOM 134 C C . LEU A 1 44 ? 51.747 0.348 3.806 1.00 11.46 ? 44 LEU A C 1
ATOM 135 O O . LEU A 1 44 ? 52.280 1.439 3.458 1.00 11.33 ? 44 LEU A O 1
ATOM 136 C CB . LEU A 1 44 ? 51.998 -1.230 1.868 1.00 11.40 ? 44 LEU A CB 1
ATOM 137 C CG . LEU A 1 44 ? 53.214 -1.982 2.440 1.00 12.45 ? 44 LEU A CG 1
ATOM 138 C CD1 . LEU A 1 44 ? 52.761 -3.240 3.096 1.00 12.03 ? 44 LEU A CD1 1
ATOM 139 C CD2 . LEU A 1 44 ? 54.276 -2.180 1.348 1.00 12.60 ? 44 LEU A CD2 1
ATOM 140 N N . GLY A 1 45 ? 51.790 -0.153 5.040 1.00 10.05 ? 45 GLY A N 1
ATOM 141 C CA . GLY A 1 45 ? 52.519 0.508 6.128 1.00 9.74 ? 45 GLY A CA 1
ATOM 142 C C . GLY A 1 45 ? 53.610 -0.436 6.629 1.00 11.26 ? 45 GLY A C 1
ATOM 143 O O . GLY A 1 45 ? 53.315 -1.558 6.981 1.00 12.62 ? 45 GLY A O 1
ATOM 144 N N . VAL A 1 46 ? 54.858 0.052 6.736 1.00 9.83 ? 46 VAL A N 1
ATOM 145 C CA . VAL A 1 46 ? 56.001 -0.765 7.245 1.00 10.95 ? 46 VAL A CA 1
ATOM 146 C C . VAL A 1 46 ? 56.829 0.052 8.218 1.00 11.74 ? 46 VAL A C 1
ATOM 147 O O . VAL A 1 46 ? 57.195 1.178 7.900 1.00 11.71 ? 46 VAL A O 1
ATOM 148 C CB . VAL A 1 46 ? 56.902 -1.211 6.095 1.00 11.17 ? 46 VAL A CB 1
ATOM 149 C CG1 . VAL A 1 46 ? 58.051 -2.097 6.575 1.00 11.51 ? 46 VAL A CG1 1
ATOM 150 C CG2 . VAL A 1 46 ? 56.079 -1.896 5.021 1.00 11.96 ? 46 VAL A CG2 1
ATOM 151 N N . ALA A 1 47 ? 57.155 -0.550 9.359 1.00 10.41 ? 47 ALA A N 1
ATOM 152 C CA . ALA A 1 47 ? 58.196 -0.058 10.232 1.00 11.33 ? 47 ALA A CA 1
ATOM 153 C C . ALA A 1 47 ? 59.090 -1.236 10.698 1.00 12.04 ? 47 ALA A C 1
ATOM 154 O O . ALA A 1 47 ? 58.626 -2.194 11.381 1.00 11.38 ? 47 ALA A O 1
ATOM 155 C CB . ALA A 1 47 ? 57.634 0.659 11.419 1.00 10.74 ? 47 ALA A CB 1
ATOM 156 N N . LEU A 1 48 ? 60.361 -1.138 10.311 1.00 12.64 ? 48 LEU A N 1
ATOM 157 C CA . LEU A 1 48 ? 61.384 -2.089 10.689 1.00 13.56 ? 48 LEU A CA 1
ATOM 158 C C . LEU A 1 48 ? 62.335 -1.355 11.641 1.00 14.51 ? 48 LEU A C 1
ATOM 159 O O . LEU A 1 48 ? 62.791 -0.267 11.274 1.00 14.64 ? 48 LEU A O 1
ATOM 160 C CB . LEU A 1 48 ? 62.171 -2.587 9.493 1.00 15.33 ? 48 LEU A CB 1
ATOM 161 C CG . LEU A 1 48 ? 63.367 -3.537 9.869 1.00 16.99 ? 48 LEU A CG 1
ATOM 162 C CD1 . LEU A 1 48 ? 62.840 -4.876 10.312 1.00 18.21 ? 48 LEU A CD1 1
ATOM 163 C CD2 . LEU A 1 48 ? 64.349 -3.667 8.694 1.00 19.85 ? 48 LEU A CD2 1
ATOM 164 N N . ILE A 1 49 ? 62.515 -1.861 12.867 1.00 12.93 ? 49 ILE A N 1
ATOM 165 C CA . ILE A 1 49 ? 63.549 -1.373 13.716 1.00 13.91 ? 49 ILE A CA 1
ATOM 166 C C . ILE A 1 49 ? 64.614 -2.493 13.835 1.00 13.70 ? 49 ILE A C 1
ATOM 167 O O . ILE A 1 49 ? 64.326 -3.640 14.280 1.00 13.95 ? 49 ILE A O 1
ATOM 168 C CB . ILE A 1 49 ? 63.080 -1.100 15.166 1.00 14.53 ? 49 ILE A CB 1
ATOM 169 C CG1 . ILE A 1 49 ? 62.043 -0.004 15.217 1.00 16.71 ? 49 ILE A CG1 1
ATOM 170 C CG2 . ILE A 1 49 ? 64.269 -0.750 16.039 1.00 15.70 ? 49 ILE A CG2 1
ATOM 171 C CD1 . ILE A 1 49 ? 61.384 0.110 16.590 1.00 16.96 ? 49 ILE A CD1 1
ATOM 172 N N . ASN A 1 50 ? 65.871 -2.096 13.606 1.00 13.30 ? 50 ASN A N 1
ATOM 173 C CA . ASN A 1 50 ? 66.997 -3.036 13.755 1.00 15.41 ? 50 ASN A CA 1
ATOM 174 C C . ASN A 1 50 ? 67.685 -2.679 15.091 1.00 17.79 ? 50 ASN A C 1
ATOM 175 O O . ASN A 1 50 ? 68.311 -1.650 15.165 1.00 18.19 ? 50 ASN A O 1
ATOM 176 C CB . ASN A 1 50 ? 67.917 -2.864 12.560 1.00 16.11 ? 50 ASN A CB 1
ATOM 177 C CG . ASN A 1 50 ? 69.109 -3.799 12.604 1.00 17.38 ? 50 ASN A CG 1
ATOM 178 O OD1 . ASN A 1 50 ? 69.442 -4.356 13.660 1.00 16.74 ? 50 ASN A OD1 1
ATOM 179 N ND2 . ASN A 1 50 ? 69.681 -4.069 11.439 1.00 21.35 ? 50 ASN A ND2 1
ATOM 180 N N . THR A 1 51 ? 67.582 -3.533 16.111 1.00 17.08 ? 51 THR A N 1
ATOM 181 C CA . THR A 1 51 ? 68.079 -3.178 17.418 1.00 17.58 ? 51 THR A CA 1
ATOM 182 C C . THR A 1 51 ? 69.619 -3.327 17.502 1.00 20.44 ? 51 THR A C 1
ATOM 183 O O . THR A 1 51 ? 70.205 -2.930 18.502 1.00 20.67 ? 51 THR A O 1
ATOM 184 C CB . THR A 1 51 ? 67.463 -4.018 18.510 1.00 18.42 ? 51 THR A CB 1
ATOM 185 O OG1 . THR A 1 51 ? 67.927 -5.377 18.494 1.00 16.76 ? 51 THR A OG1 1
ATOM 186 C CG2 . | |
<filename>micron/stagecontrol.py
#!/usr/bin/env python3
# tertiary Helper
# Unless absolutely necessary, do not use self.controller.send(...)
# Implement the method in micron.py and call that instead
# Abstraction yo
# Advanced level functions combining multiple basic functions are to be implemented here
# Methods involving multiple functions in this script should generally be implemented in a separate script and not implemented here, unless it is a very common function
# stage-control to interact with the NanoLab Microcontroller
# Microcontroller Model: Micos 1860SMC Basic
# Made 2019, <NAME>, <NAME>
# sunyudong [at] outlook [dot] sg, mingonsgwu [at] outlook [dot] sg
# github.com/sunjerry019/photonLauncher
# Change code here if for e.g. sounds needs to be played BEFORE the completion of the raster
import micron
import playsound
import numpy as np
import math
import time
import datetime
import threading
from extraFunctions import ThreadWithExc
import jukebox
class InputError(Exception):
# Error in user input -> To be caught and flagged accordingly
pass
class StageControl():
def __init__(self, noinvertx = 1, noinverty = 1, GUI_Object = None, jukeboxKWArgs = {}, noFinishTone = True, **kwargs):
# noinvertx can take values 1 and -1
assert noinvertx in (-1, 1), "No invertx can only take -1 or 1"
assert noinverty in (-1, 1), "No inverty can only take -1 or 1"
self.controller = micron.Micos(GUI_Object = GUI_Object, **kwargs)
self.GUI_Object = GUI_Object
self.noinvertx = noinvertx
self.noinverty = noinverty
# Generate filename based on the serial number of the model
self.serial = self.controller.getSerial()
self.noFinishTone = noFinishTone
# define contants
self.UP, self.RIGHT, self.DOWN, self.LEFT = 0, 1, 2, 3
# music thread
self.musicProcess = None
# jukeboxKWArgs.update({
# "profile": "alarm"
# })
self.jukebox = jukebox.JukeBox(**jukeboxKWArgs) # playmusic = True,
def finishTone(self):
# Play sound to let user know that the action is completed
# To stop, call self.musicProcess.terminate()
self.musicProcess = ThreadWithExc(target = self.jukebox.playmusic, kwargs = { "quiet": self.noFinishTone })
self.musicProcess.start()
if self.GUI_Object:
self.GUI_Object.operationDone.emit()
# ARCHIVE CODE
# , jukeboxKWArgs = {}
# target=self.jukeboxThread, kwargs=jukeboxKWArgs, args=(,)
# def jukeboxThread(self, **jukeboxKWArgs):
# return
# implement cardinal direction movement definitions, the input cases arent actually necessary once we have buttons paired to commands on guimicro
def rcardinal(self, direction, distance):
if (direction == self.LEFT):
return self.controller.rmove(x = distance * self.noinvertx, y = 0)
elif (direction == self.RIGHT):
return self.controller.rmove(x = -distance * self.noinvertx, y = 0)
elif (direction == self.UP):
return self.controller.rmove(y = distance * self.noinverty, x = 0)
elif (direction == self.DOWN):
return self.controller.rmove(y = -distance * self.noinverty, x = 0)
else:
return False
def rdiagonal(self, distance):
# implement drawing of diagonals
# implement button for relative move directly
distance /= 1.414213562373095
self.controller.rmove(x = distance * self.invertx, y = distance * self.inverty)
# most basic, single rectangle cut rastering
def singleraster(self, velocity, xDist, yDist, rasterSettings, returnToOrigin = False, estimateTime = True, onlyEstimate = False, quietLog = False, verboseLog = False):
# Raster in a rectangle
# rasterSettings = {
# "direction": "x" || "y" || "xy", # Order matters here xy vs yx
# "step": 1 # If set to xy, step is not necessary
# }
# setting onlyEstimate will return the estimated time for the action
# xy/yx = Draw a rectangle with sides xDist and yDist
# x = horizontal lines will be drawn while scanning down/up
# y = vertical lines will be drawn while scanning right/left
# i.e. to say axis = continuous on which axis
# Negative distance to raster in the opposite direction
# Step must be positive
# We check if everything is valid
try:
assert isinstance(velocity, (int, float)) , "Velocity must be integer or float"
assert isinstance(xDist, (int, float)) , "xDist must be integer or float"
assert isinstance(yDist, (int, float)) , "yDist must be integer or float"
assert isinstance(rasterSettings, dict) , "rasterSettings must be a dictionary"
assert "direction" in rasterSettings , "Raster direction must be in rasterSettings"
assert isinstance(rasterSettings["direction"], str), "Invalid raster direction: {}".format(rasterSettings["direction"])
# rastering x or y
if len(rasterSettings["direction"]) == 1:
assert rasterSettings["direction"] in self.controller.axes, "Invalid raster direction: {}".format(rasterSettings["direction"])
assert "step" in rasterSettings , "Raster step must be in rasterSettings"
assert rasterSettings["step"] > 0 , "Step size must be positive"
else:
assert len(rasterSettings["direction"]) == 2 and (set(rasterSettings["direction"]) == set(self.controller.axes)), "Invalid raster direction {}".format(rasterSettings["direction"])
# Check stage limits
assert self.controller.stage.xlim[0] <= self.controller.stage.x + xDist <= self.controller.stage.xlim[1], "x not within limits"
assert self.controller.stage.ylim[0] <= self.controller.stage.y + yDist <= self.controller.stage.ylim[1], "y not within limits"
except AssertionError as e:
raise InputError(e)
if onlyEstimate:
estimateTime = True
# Set shutter to not output logs
# To ensure the timing is displayed
self.controller.shutter.quietLog = True
# ACTUAL FUNCTION
self.controller.setvel(velocity)
# Get the current position of the stage
oX, oY = self.controller.stage.x, self.controller.stage.y
# Get index of the first direction to raster
# We change to axes A and B because it could be xy or yx
a = self.controller.axes.index(rasterSettings["direction"][0])
b = a ^ 1
distances = [xDist, yDist]
# Check the raster step
if len(rasterSettings["direction"]) > 1:
# Rastering a square
if estimateTime:
_totalTime = 2 * micron.Micos.getDeltaTime(distances[a], 0, velocity) + \
2 * micron.Micos.getDeltaTime(distances[b], 0, velocity)
# We always return to origin, so need not calculate
if onlyEstimate:
return _totalTime
_doneTime = datetime.datetime.now() + datetime.timedelta(seconds = _totalTime)
if not quietLog:
self.logconsole("Total Time = {} Est Done = {}".format(_totalTime, _doneTime.strftime('%Y-%m-%d %H:%M:%S')))
# Relative moves are blocking, so we can flood the FIFO stack after we are sure all commands have been cleared
self.controller.waitClear()
self.controller.shutter.open()
self.controller.rmove(**{
self.controller.axes[a]: distances[a],
self.controller.axes[b]: 0
})
self.controller.rmove(**{
self.controller.axes[a]: 0,
self.controller.axes[b]: distances[b]
})
self.controller.rmove(**{
self.controller.axes[a]: -distances[a],
self.controller.axes[b]: 0
})
self.controller.rmove(**{
self.controller.axes[a]: 0,
self.controller.axes[b]: -distances[b]
})
self.controller.waitClear()
self.controller.shutter.close()
else:
# Normal rastering
# Since python range doesn't allow for float step sizes, we find the number of times to go raster a line
# DO NOTE THAT THIS PROBABLY WILL CAUSE ROUNDING ERRORS
# Floats are ROUNDED DOWN!
_lines = math.floor(abs(distances[b] / rasterSettings["step"]))
if estimateTime:
# It doesnt matter if its x or y
_bDirTime = micron.Micos.getDeltaTime(rasterSettings["step"], 0, velocity)
_timeperline = micron.Micos.getDeltaTime(distances[a], 0, velocity) + _bDirTime
_totalTime = _lines * _timeperline - _bDirTime
_totalTime += micron.Micos.getDeltaTime(0, 0, 100, shutterCycles = 2, shutterAbsoluteMode = self.controller.shutter.absoluteMode)
if returnToOrigin:
# If even, we end up at the top right of the box // _q = 0
# If odd, we end up at the bottom right of the box // _q = 1
_q = _lines % 2
_totalTime += micron.Micos.getDeltaTime(distances[a] if _q else 0, _lines * rasterSettings["step"] , 1000)
if onlyEstimate:
return _totalTime
_deltaTime = datetime.timedelta(seconds = _totalTime)
_doneTime = datetime.datetime.now() + _deltaTime
# "Time/line =", _timeperline,
if not quietLog:
self.logconsole("Total Time = {} Lines = {} Est Done = {}".format(_deltaTime, _lines, _doneTime.strftime('%Y-%m-%d %H:%M:%S')))
_step = -rasterSettings["step"] if distances[b] < 0 else rasterSettings["step"]
self.controller.shutter.open()
t0 = datetime.datetime.now()
for i in range(_lines):
print("Rastering line ", i)
# If its not the first one, move B-Axis
if i:
self.controller.rmove(**{
self.controller.axes[a]: 0,
self.controller.axes[b]: _step
})
_q = i % 2 # switch directions for rastering every time
self.controller.rmove(**{
# First one moves right
self.controller.axes[a]: distances[a] if not _q else -distances[a],
self.controller.axes[b]: 0
})
# self.controller.waitClear()
# time.sleep(_sleepTime if not i else _sleepTime - _bDirTime)
# MOVED SLEEP TO RMOVE
t1 = datetime.datetime.now()
self.controller.waitClear()
t2 = datetime.datetime.now()
if verboseLog:
self.logconsole("\nTimes = {}, {}".format(t1 - t0, t2 - t0))
print("\nSTATUS = ",self.controller.getStatus(),"\n")
self.controller.shutter.close()
if returnToOrigin:
self.controller.shutter.close()
# we /could/ use self.controller.move() but I don't really trust it
# so...relative move
cX, cY = self.controller.stage.x, self.controller.stage.y
self.controller.setvel(1000)
self.controller.rmove(x = oX - cX, y = oY - cY)
self.controller.setvel(velocity)
self.controller.shutter.quietLog = False
if not quietLog:
self.finishTone()
# overpowered, omni-potent rastering solution for both laser power and velocity
def arrayraster(self, inivel, inipower, x_isVel, ncols, xincrement, xGap, y_isVel, nrows, yincrement, yGap, xDist, yDist, rasterSettings, returnToOrigin = True):
# building parameter mother-of-all-lists (MOAL) to parse through when cutting every individual raster. Raster array will be numbered left to right top to bottom
# info structure: <primary list> <raster1> (initial position tuple1), [velocity, power]</raster1> <raster2> (initial position tuple2), [velocity, power]</raster2> .... </primary list>
# NOTE: This function can CLEARLY be optimized
# Struct [
# [(Init Pos), [velocity, power]], ...
# ]
# Set shutter to not output logs
# To ensure the timing is displayed
self.controller.shutter.quietLog = True
xone = self.controller.stage.x
yone = self.controller.stage.y
moal = []
for i in range(nrows):
for j in range(ncols):
nthsquare = []
axe = xone + j * (xDist + xGap)
why = yone + i * (yDist + yGap)
# gui combobox setting: velocity is True, power is False
if x_isVel and y_isVel:
speed = (inivel + i * yincrement) + xincrement * j
powa = inipower
elif x_isVel and not y_isVel:
speed = inivel + xincrement * j
powa = inipower + yincrement * i
elif not x_isVel and not y_isVel:
speed = inivel
powa = (inivel + i * yincrement) + xincrement * j
elif not x_isVel and y_isVel:
speed = inivel + yincrement * i
powa = inipower + xincrement * j
startpos = (axe , why)
speedpowa = [speed, powa]
nthsquare.append(startpos)
nthsquare.append(speedpowa)
moal.append(nthsquare)
print(moal)
#TODO! have a countdown for all rastering and run timer in separate thread
# Estimate time
totaltime = 0
if len(moal) > 50:
# Sorry, algo is trash
estTimeString = "No time estimate, too many squares"
else:
firstsq = True
for n, square in enumerate(moal):
subtotaltime = 0 if firstsq else micron.Micos.getDeltaTime(x = xGap + xDist, y = 0, velocity = 500)
firstsq = False
if not (n + 1) % ncols:
micron.Micos.getDeltaTime(x = -ncols * (xGap | |
import itertools
from collections import defaultdict
from functools import partial
from math import ceil
from multiprocessing import Pool
from os import cpu_count
import numpy as np
from numba import njit
from pandas import DataFrame
def __set_matrix(x, phases=None, subset_genes=None, subset_samples=None, rm_zeros=True, fraction=None, verbose=False):
"""
Sets the parameter for the algorithms and trims the 'x'-matrix to contain only necessary elements
:param x: Pandas-Matrix with gene counts, index must contain gene names, columns must contain sample names
:param phases: Dictionary of Lists, i.e. {phase: [sample, ...]}, containing annotation of samples to their phase
:param subset_genes: List of Indices, Names or Boolean of genes to look at. Excluding all other.
:param subset_samples: List of Indices, Names or Boolean of samples to look at. Excluding all other
:return: Dictionary: {
"x": truncated matrix values,
"phases": phases annotation,
"sample_names": list of sample names,
"gene_names": list of gene names,
"thresholds": thresholds
}
"""
current_shape = x.shape
if verbose:
print('[__set_matrix] Original Matrix \'x\' has shape {} x {}'.format(
current_shape[0], current_shape[1]
))
# Check for index
if x.index.dtype != object:
raise Exception("Index empty! Please set genes as index with pandas.set_index().")
# Copy x to ensure original matrix is not altered
x_copy = x.copy()
# Eliminate rows where genes not in 'subset_genes', if provided
if subset_genes is not None:
# Make sure 'subset_genes' is index array if boolean or named array is supplied
# And remove genes provided in 'subset_genes' but not contained in 'x'
genes_mask = to_index(subset_genes, x_copy.index)
x_copy = x_copy.iloc[genes_mask, :]
if verbose:
print('[__set_matrix] Removed {} genes that were not in \'subset_genes\'. {} genes remaining.'.format(
(current_shape[0] - x_copy.shape[0]), x_copy.shape[0])
)
current_shape = x_copy.shape
if rm_zeros:
# Eliminate not expressed genes
x_copy = x_copy[(x_copy.T != 0).any()]
if verbose:
print('[__set_matrix] Removed {} genes that were not expressed in any samples. {} genes remaining.'.format(
(current_shape[0] - x_copy.shape[0]), x_copy.shape[0])
)
# Store remaining gene names for later use, rename for readability
gene_names = list(x_copy.index)
# Store all sample names for re calculation of indices
all_samples = x_copy.columns.values
# Eliminate columns where samples not in 'subset_samples', if provided
if subset_samples is not None:
# Make sure 'subset_samples' is index array if boolean or named array is supplied
# And remove samples provided in 'subset_genes' but not contained in 'x'
sample_mask = to_index(subset_samples, all_samples)
x_copy = x_copy.iloc[:, sample_mask]
if verbose:
print('[__set_matrix] Removed {} samples that were not in \'subset_samples\'. {} samples remaining.'.format(
(current_shape[1] - x_copy.shape[1]), x_copy.shape[1])
)
current_shape = x_copy.shape
thresholds = None
phases_copy = None
# Eliminate samples not annotated in 'phases'
if phases is not None:
# Get 1D index based mask from samples per phase
# And remove all samples not contained in this list
phase_mask = [
idx for _, samples in phases.items()
for idx in to_index(
to_named(samples, all_samples),
x_copy.columns.values
)
]
x_copy = x_copy.iloc[:, phase_mask]
if verbose:
print(
'[__set_matrix] Removed {} samples that were not annotated in \'phases\'. {} samples remaining.'.format(
(current_shape[1] - x_copy.shape[1]), x_copy.shape[1])
)
# Re-calculate phases indices based on truncated sample list
phases_copy = {
phase: to_index(
to_named(samples, all_samples),
x_copy.columns.values
) for phase, samples in phases.items()
}
# Pre Calculate thresholds for phases
thresholds = {phase: ceil(len(samples) * fraction) for phase, samples in phases_copy.items()}
# Store remaining sample names for later use, rename for readability
sample_names = list(x_copy.columns.values)
if verbose:
print('[__set_matrix] Matrix truncation done. Working with {} genes for {} samples.'.format(
x_copy.shape[0], x_copy.shape[1])
)
# Transform to ndarray for faster calculations
x_copy = x_copy.values
return {
"x": x_copy,
"phases": phases_copy,
"sample_names": sample_names,
"gene_names": gene_names,
"thresholds": thresholds
}
def sandbag(x, phases, fraction=0.5, processes=1, subset_genes=None, subset_samples=None, weighted=False,
triplets=False, verbose=False):
""" Calculates the pairs of genes serving as marker pairs for each phase, based on a matrix of gene counts and
an annotation of known phases.
:param x: Pandas-Matrix with gene counts, index must contain gene names, columns must contain sample names
:param fraction: Fraction to be used as threshold.
:param processes: Number of processes to use for multiprocess.pool
:param phases: Dictionary of Lists, i.e. {phase: [sample, ...]}, containing annotation of samples to their phase
:param subset_genes: List of Indices, Names or Boolean of genes to look at excluding all other
:param subset_samples: List of Indices, Names or Boolean of samples to look at excluding all other
:param weighted: Calculate weight for each pair.
:param triplets: Calculate 3-tuples instead of pairs. Where (g1 > g2 > g3)
:param verbose: Debug info
:return: Dictionary of List of Tuples, i.e. {phase: [(Gene1, Gene2), ...]}, containing marker pairs per phase
"""
# Set the parameter to the class instance and remove unnecessary elements in 'x'
params = __set_matrix(x, fraction=fraction, phases=phases, subset_genes=subset_genes, subset_samples=subset_samples,
verbose=verbose)
if verbose:
print('[sandbag] Identifying marker pairs...', end='')
possible_combinations = itertools.combinations(range(0, len(params["gene_names"])), 2)
if processes == 0:
processes = cpu_count() - 1
masks = (params["phases"]["G1"], params["phases"]["S"], params["phases"]["G2M"])
thresholds = [params["thresholds"]["G1"], params["thresholds"]["S"], params["thresholds"]["G2M"]]
check_phase_for_pair_wrapper_par = partial(check_phase_for_pair_wrapper, x=params["x"], masks=masks,
thresholds=thresholds)
# Multi cored calculation if requested
if processes != 1:
# Worker pool of processes
with Pool(processes=processes) as pool:
if verbose:
print("Processing in parallel with {} processes...".format(processes))
annotations = pool.map(
check_phase_for_pair_wrapper_par, possible_combinations)
annotations = list(annotations)
else:
annotations = (check_phase_for_pair_wrapper_par(pair) for pair in possible_combinations)
# Create container for marker pairs
marker_pairs = {phase: [] for phase in phases.keys()}
# Puts marker pairs into the 'marker_pairs' dictionary and removes 'None' phase annotation
for annotation in annotations:
if annotation[0]:
if weighted:
marker_pairs[annotation[0]].append(
(
params["gene_names"][annotation[1][0]],
params["gene_names"][annotation[1][1]],
(annotation[2] / len(params["sample_names"]))
)
)
else:
marker_pairs[annotation[0]].append(
(params["gene_names"][annotation[1][0]], params["gene_names"][annotation[1][1]]))
if triplets:
marker_pairs = identify_triplets(marker_pairs, weighted=weighted)
if verbose:
count_pairs = 0
for _, pairs in marker_pairs.items():
count_pairs = count_pairs + len(pairs)
print(" Done!")
print("[sandbag] Identified {} marker pairs (phase: count):".format(count_pairs), end=' ')
print({phase: len(pairs) for phase, pairs in marker_pairs.items()})
# Return 'marker_pairs' dictionary: {phase: [(Gene1, Gene2), ...]}
return marker_pairs
def cyclone(x, marker_pairs, subset_genes=None, iterations=1000, min_iter=100, min_pairs=50,
subset_samples=None, verbose=False, rm_zeros=False, processes=1, weighted=False, triplets=False):
""" Calculates scores for each sample and each phase and assigns prediction based on marker pairs indentified by
sandbag
:param x: Pandas-Matrix with gene counts, index must contain gene names, columns must contain sample names
:param marker_pairs: Dict of marker pairs per phase. See sandbag output.
:param iterations: An integer scalar specifying the number of iterations for random sampling to obtain a cycle
score.
:param min_iter: An integer scalar specifying the minimum number of iterations for score estimation
:param min_pairs: An integer scalar specifying the minimum number of pairs for cycle estimation.
:param subset_genes: List of Indices, Names or Boolean of genes to look at excluding all other
:param subset_samples: List of Indices, Names or Boolean of samples to look at excluding all other
:param weighted: Use weights for score calculation
:param processes: Number of processes to use for multiprocess.pool
:param rm_zeros: Whether not expressed genes should be removed
:param triplets: Pairs a 3-tuples
:param verbose: Debug info
:return: Dictionary of List of Tuples, i.e. {phase: [(Gene1, Gene2), ...]}, containing marker pairs per phase
"""
params = __set_matrix(x, subset_genes=subset_genes, subset_samples=subset_samples, rm_zeros=rm_zeros,
verbose=verbose)
if verbose:
print('[cyclone] Preparing marker pairs, where at least one gene was not present in \'x\'...', end='')
# Eliminate all gene pairs where at least one gene is not present in gene_names and convert to index
marker_pairs_idx = defaultdict(list)
removed = 0
used = defaultdict(list)
used_idx = defaultdict(list)
gene_name_idx = {g: i for i, g in enumerate(params["gene_names"])}
weights = defaultdict(list)
# Generate used list
for phase, pairs in marker_pairs.items():
u = []
for pair in pairs:
try:
if weighted:
if len(pair) == 4:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], gene_name_idx[pair[2]])
u.extend([idx_pair[0], idx_pair[1], idx_pair[2]])
else:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], -1)
u.extend([idx_pair[0], idx_pair[1]])
weights[phase].append(pair[-1])
else:
if len(pair) == 3:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], gene_name_idx[pair[2]])
u.extend([idx_pair[0], idx_pair[1], idx_pair[2]])
else:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], -1)
u.extend([idx_pair[0], idx_pair[1]])
weights[phase].append(1)
marker_pairs_idx[phase].append(idx_pair)
except KeyError:
removed = removed + 1
used[phase] = list(np.unique(u))
for phase, pairs in marker_pairs.items():
u_idx = np.empty(len(params["gene_names"]), dtype=int)
for i, u in enumerate(used[phase]):
u_idx[u] = i
used_idx[phase] = u_idx
if verbose:
count_pairs = 0
for phase, pairs in marker_pairs_idx.items():
count_pairs = count_pairs + len(pairs)
| |
re.match('b',Wboard.w2e)and Wboard.w5h==''\
and board.s4g+board.s3f=='':
moves = '2e5h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w2e)and Wboard.w4g==''\
and board.s3f=='':
moves = '2e4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2e)and Wboard.w6i==''\
and board.s5h+board.s4g+board.s3f=='':
moves = '2e6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w2e)and Wboard.w5h==''\
and board.s4g+board.s3f=='':
moves = '2e5h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b',Wboard.w2e)and Wboard.w4g==''\
and board.s3f=='':
moves = '2e4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w3e !='':
if re.match(r'[plsgrk+]', Wboard.w3e)and Wboard.w3f=='':
moves = '3e3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3e)and Wboard.w2f=='':
moves = '3e2f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w3e)and Wboard.w4f=='':
moves = '3e4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3e)and Wboard.w2e=='':
moves = '3e2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3e)and Wboard.w4e=='':
moves = '3e4e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w3e)and Wboard.w3d=='':
moves = '3e3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w3e)and Wboard.w2d=='':
moves = '3e2d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w3e)and Wboard.w4d=='':
moves = '3e4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3e)and Wboard.w2g=='':
moves = '3e2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3e)and Wboard.w4g=='':
moves = '3e4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3e)and Wboard.w2g=='':
moves = '3e2g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w3e)and Wboard.w4g=='':
moves = '3e4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3e)and Wboard.w3i==''\
and board.s3h+board.s3g+board.s3f=='':
moves = '3e3i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3e)and Wboard.w3i==''\
and board.s3h+board.s3g+board.s3f=='':
moves = '3e3i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w3e)and Wboard.w3h==''\
and board.s3g+board.s3f=='':
moves = '3e3h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3e)and Wboard.w3h==''\
and board.s3g+board.s3f=='':
moves = '3e3h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w3e)and Wboard.w3g==''\
and board.s3f=='':
moves = '3e3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w3e)and Wboard.w3g==''\
and board.s3f=='':
moves = '3e3g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w3c==''\
and board.s3d=='':
moves = '3e3c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w3b==''\
and board.s3d+board.s3c=='':
moves = '3e3b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w3a==''\
and board.s3d+board.s3c+board.s3b=='':
moves = '3e3a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w1e==''\
and board.s2e=='':
moves = '3e1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w5e==''\
and board.s4e=='':
moves = '3e5e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w6e==''\
and board.s4e+board.s5e=='':
moves = '3e6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w7e==''\
and board.s4e+board.s5e+board.s6e=='':
moves = '3e7e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w8e==''\
and board.s4e+board.s5e+board.s6e+board.s7e=='':
moves = '3e8e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w3e)and Wboard.w9e==''\
and board.s4e+board.s5e+board.s6e+board.s7e+board.s8e=='':
moves = '3e9e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3e)and Wboard.w1g==''\
and board.s2f=='':
moves = '3e1g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b',Wboard.w3e)and Wboard.w1g==''\
and board.s2f=='':
moves = '3e1g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3e)and Wboard.w5c==''\
and board.s2d=='':
moves = '3e5c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3e)and Wboard.w6b==''\
and board.s2d+board.s5c=='':
moves = '3e6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3e)and Wboard.w7a==''\
and board.s2d+board.s5c+board.s6b=='':
moves = '3e7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3e)and Wboard.w7i==''\
and board.s6h+board.s5g+board.s4f=='':
moves = '3e7i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3e)and Wboard.w6h==''\
and board.s5g+board.s4f=='':
moves = '3e6h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w3e)and Wboard.w5g==''\
and board.s4f=='':
moves = '3e5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3e)and Wboard.w7i==''\
and board.s6h+board.s5g+board.s4f=='':
moves = '3e7i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w3e)and Wboard.w6h==''\
and board.s5g+board.s4f=='':
moves = '3e6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b',Wboard.w3e)and Wboard.w5g==''\
and board.s4f=='':
moves = '3e5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w3e)and Wboard.w1c==''\
and board.s2d=='':
moves = '3e1c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w4e !='':
if re.match(r'[plsgrk+]', Wboard.w4e)and Wboard.w4f=='':
moves = '4e4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w4e)and Wboard.w3f=='':
moves = '4e3f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w4e)and Wboard.w5f=='':
moves = '4e5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w4e)and Wboard.w3e=='':
moves = '4e3e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w4e)and Wboard.w5e=='':
moves = '4e5e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w4e)and Wboard.w4d=='':
moves = '4e4d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w4e)and Wboard.w3d=='':
moves = '4e3d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|\+b|b|s|k',Wboard.w4e)and Wboard.w5d=='':
moves = '4e5d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w4e)and Wboard.w3g=='':
moves = '4e3g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w4e)and Wboard.w5g=='':
moves = '4e5g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w4e)and Wboard.w3g=='':
moves = '4e3g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('n', Wboard.w4e)and Wboard.w5g=='':
moves = '4e5g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4e)and Wboard.w4i==''\
and board.s4h+board.s4g+board.s4f=='':
moves = '4e4i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w4e)and Wboard.w4i==''\
and board.s4h+board.s4g+board.s4f=='':
moves = '4e4i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+r', Wboard.w4e)and Wboard.w4h==''\
and board.s4g+board.s4f=='':
moves = '4e4h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w4e)and Wboard.w4h==''\
and board.s4g+board.s4f=='':
moves = '4e4h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|l', Wboard.w4e)and Wboard.w4g==''\
and board.s4f=='':
moves = '4e4g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'r|l', Wboard.w4e)and Wboard.w4g==''\
and board.s4f=='':
moves = '4e4g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w4c==''\
and board.s4d=='':
moves = '4e4c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w4b==''\
and board.s4d+board.s4c=='':
moves = '4e4b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w4a==''\
and board.s4d+board.s4c+board.s4b=='':
moves = '4e4a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w1e==''\
and board.s2e+board.s3e=='':
moves = '4e1e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w2e==''\
and board.s3e=='':
moves = '4e2e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w6e==''\
and board.s5e=='':
moves = '4e6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w7e==''\
and board.s5e+board.s6e=='':
moves = '4e7e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w8e==''\
and board.s5e+board.s6e+board.s7e=='':
moves = '4e8e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+r|r', Wboard.w4e)and Wboard.w9e==''\
and board.s5e+board.s6e+board.s7e+board.s8e=='':
moves = '4e9e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w4e)and Wboard.w1h==''\
and board.s2g+board.s3f=='':
moves = '4e1h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w4e)and Wboard.w2g==''\
and board.s3f=='':
moves = '4e2g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w4e)and Wboard.w1h==''\
and board.s2g+board.s3f=='':
moves = '4e1h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b',Wboard.w4e)and Wboard.w2g==''\
and board.s3f=='':
moves = '4e2g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4e)and Wboard.w6c==''\
and board.s5d=='':
moves = '4e6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4e)and Wboard.w7b==''\
and board.s5d+board.s6c=='':
moves = '4e7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4e)and Wboard.w8a==''\
and board.s5d+board.s6c+board.s7b=='':
moves = '4e8a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w4e)and Wboard.w8i==''\
and board.s7h+board.s6g+board.s5f=='':
moves = '4e8i+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w4e)and Wboard.w7h==''\
and board.s6g+board.s5f=='':
moves = '4e7h+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('b',Wboard.w4e)and Wboard.w6g==''\
and board.s5f=='':
moves = '4e6g+'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w4e)and Wboard.w8i==''\
and board.s7h+board.s6g+board.s5f=='':
moves = '4e8i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b', Wboard.w4e)and Wboard.w7h==''\
and board.s6g+board.s5f=='':
moves = '4e7h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match('\+b',Wboard.w4e)and Wboard.w6g==''\
and board.s5f=='':
moves = '4e6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4e)and Wboard.w2c==''\
and board.s3d=='':
moves = '4e2c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'\+b|b', Wboard.w4e)and Wboard.w1b==''\
and board.s3d+board.s2c=='':
moves = '4e1b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.w5e !='':
if re.match(r'[plsgrk+]', Wboard.w5e)and Wboard.w5f=='':
moves = '5e5f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w5e)and Wboard.w4f=='':
moves = '5e4f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[sgbk+]', Wboard.w5e)and Wboard.w6f=='':
moves = '5e6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if re.match(r'[grk+]', Wboard.w5e)and Wboard.w4e=='':
moves = '5e4e'
kaihimore(moves)
if oute.oute == | |
<gh_stars>1-10
"""
This module is a collection of functions to help with multi cam editing, including the multicam class.
Note:
You need scipy for sync
"""
import os
import subprocess
#from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
#from moviepy.video.VideoClip import VideoClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.compositing.concatenate import concatenate
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.conf import FFMPEG_BINARY
import numpy as np
from scipy.io import wavfile
from scipy.signal import fftconvolve
from tempfile import mkdtemp
import shutil
import pygame as pg
class MultiCam:
"""
This class
:param clips: Either a list of VideoFileClips, or a list of filenames where clips are situated.
:param times: list of lists where the first value is the time and the second is the camera number. If left empty or None the get_times function should be called before get_clip.
:param slowmo: dictionary where the keys are the index of the clip that is slowed down, the value is the amount it was slowed down.
:param *kwargs: The rest of the parameters are the optional arguments for the concatinate function.
"""
def __init__(self,clips,times=None,shift=[],slowmo={}):
### TODO add audio settings here????
self.shift = shift
self.times = times
self.converted = False
if isinstance(clips[0],VideoFileClip):
self.clips = clips
filenames = []
for clip in clips:
filenames.append(clip.parameters['filename'])
else:
# initialise clips later to save memory
self.clips = None
filenames = clips
# seperate filename and extension
self.filenames = []
for f in filenames:
f_a = f.split('.')
if len(f_a) < 2:
raise Exception("No extension?")
if len(f_a) == 2:
self.filenames.append([f_a[0],'.'+f_a[1]])
else:
f1 = []
for fp in f_a[:-1]:
f1 = f1 + fp
self.filenames.append([f1,'.'+f_a[-1]])
self.slowmo = slowmo
def load_clips(self):
""" Load clips from filenames.
"""
self.clips = []
for f in self.filenames:
self.clips.append( VideoFileClip(f[0]+f[1]) )
def sync(self,fps=11025,nbytes=2,low_memory=False,print_progress=False, convert=False):
"""
This function calculates the shift neccisary for the other cameras to be in sync
with the first camera. It uses scipy's fftconvolve to compute the
cross correlation.
:param convert: if convert is True, the audio from the video file is written to a wave file. (This uses scipy to read the file if it exists.)
"""
# first file (refence)
if convert:
# only use wav if convert is on
if os.path.exists(self.filenames[0][0]+'.wav'):
with open(self.filenames[0][0]+'.wav','rb') as f:
fs,data = wavfile.read(f)
# see if settings changed
if fs != fps:
data = write_audio(self.filenames[0],fps,nbytes,overwrite=True)
else:
data = write_audio(self.filenames[0],fps,nbytes,overwrite=True)
else:
clip = AudioFileClip(self.filenames[0][0]+self.filenames[0][1])
data = clip.to_soundarray(fps=fps, nbytes=nbytes)[0] #### is this right
clip.reader.close_proc() ############### maak seker
if low_memory:
reference = np.memmap(self.filenames[0][0]+'.dat', dtype='int16', mode='w+',shape=data.shape)
reference = data[:]
del data
else:
reference = data[:]
del data
# the rest (to sync)
shift = []
for i in range(len(self.filenames)-1):
if print_progress:
print "Syncing "+str(i+2)+" of "+str(len(self.filenames))
if convert:
# only use wav if convert is on
if os.path.exists(self.filenames[i][0]+'.wav'):
with open(self.filenames[i][0]+'.wav','rb') as f:
fs,data = wavfile.read(f)
# see if settings changed
if fs != fps:
data = write_audio(self.filenames[i],fps,nbytes,overwrite=True)
else:
data = write_audio(self.filenames[i],fps,nbytes,overwrite=True)
else:
clip = AudioClip(self.filenames[i][0]+self.filenames[i][1])
data = clip.to_soundarray(fps=fps, nbytes=nbytes)[0]
del clip.reader
if low_memory:
to_sync = np.memmap(self.filenames[i][0]+'.dat', dtype='int16', mode='w+',shape=data.shape)
to_sync = data[:]
del data
else:
to_sync = data[:] ########### neccisary? (wrong)
del data
sync_time = get_shift(reference,to_sync,fps,low_memory=low_memory)
if print_progress:
print sync_time
shift.append( sync_time )
self.shift = shift
return shift
def get_clip(self,**kargs):
"""
"""
if self.times == None:
raise Exception('Times not specified. Run get_times.')
if self.clips == None:
self.load_clips()
clip_array = []
for i,time in enumerate(self.times[:-1]):
clip_start = time[0]
clip_end = self.times[i+1][0]
if i in self.slowmo:
clip_start = clip_start*self.slowmo[i]
clip_end = clip_end*self.slowmo[i]
if time[1] > 0:
clip_start = clip_start - self.shift[time[1]-1]
clip_end = clip_end - self.shift[time[1]-1]
# round frames?
if clip_start < 0:
clip_start = 0
if clip_end < 0:
clip_end = 0
if clip_start > self.clips[time[1]].duration:
clip_start = self.clips[time[1]].duration - 1.0/self.clips[time[1]].fps
if clip_end > self.clips[time[1]].duration:
clip_end = self.clips[time[1]].duration
clip_array.append(self.clips[time[1]].subclip(clip_start,clip_end))
return concatenate(clip_array,**kargs)
def get_times(self):
"""
This is not perfect but wil help you. Consider running this from the terminal.
"""
self.load_clips()
clip = self.clips[0] # reference ???????????????
print "Click when you want to switch views."
times = clip.preview(fps=5,audio=False,func=get_time)
print "Now pick viewpoints."
#TODO print "Use up and down arrow to select view points, enter to select"
for i in len(times):
clip.show(times[i][0])
in_str = raw_input( "To which camera should it switch here?" )
times[i].append(int(in_str))
self.times = times
return times
def close(self):
if self.clips != None:
for c in self.clips:
c.reader.close()
if c.audio:
c.audio.reader.close_proc()
# is hierdie nodig? Hy laai hom inelkgeval in memory...
def write_audio(filename,fps,nbytes,overwrite=False):
"""
:param filename: list with first index the filename, second the extension.
"""
input_str = 'n\n'
if overwrite:
input_str = 'y\n'
cmd = [ FFMPEG_BINARY, '-i', filename[0]+filename[1], '-vn',
'-acodec', 'pcm_s%dle'%(8*nbytes),
'-ar', '%d'%fps,
'-ac', '1', filename[0]+'.wav']
p = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
ret = p.communicate(input=input_str)
with open(filename[0]+'.wav','rb') as f:
fs,data = wavfile.read(f)
return data
def check_numbers(base_folder,cameras,extension = '.MP4',raise_error=True):
"""
Checks number of files in each camera folder.
If they differ raise an error with helpfull
information if raise_error is True.
Otherwise simply prints info and returns False.
"""
structure = get_file_structure(base_folder,cameras,extension=extension)
num_of_files = len(structure[0])
for i,gp in enumerate(structure):
if len(gp) != num_of_files:
if raise_error:
raise Exception("The " + str(i+1) + "th folder has " + str(len(gp)) + " video files, while the previous has " + str(len(structure[i-1])))
else:
print "The " + str(i+1) + "th folder has " + str(len(gp)) + " video files, while the previous has " + str(len(structure[i-1]))
return False
return True
def check_sizes(base_folder,cameras,extension = '.MP4', tollerance=0.7, raise_error=True):
"""
This function check that corresponding file sizes in camera folders are
within a certain tollerance.
If rease_error is True if fails with an error,
if it is False it goes into an interactive mode and returns False only if a
difference in filesize is not acceptible.
"""
filenames = get_file_structure(base_folder,cameras,extension=extension)
for i in range(len(filenames[0])):
filesize_base = os.path.getsize(filenames[0][i])
for j in range(len(filenames)-1):
filesize = os.path.getsize(filenames[j+1][i])
ok = False
if float(min(filesize_base,filesize))/max(filesize_base,filesize) > tollerance:
ok = True
elif float(min(filesize_base,2*filesize))/max(filesize_base,2*filesize) > tollerance:
# allow double frame rate on camera 1
ok = True
elif float(min(2*filesize_base,filesize))/max(2*filesize_base,filesize) > tollerance:
# allow double frame rate on other cameras
ok = True
elif float(min(filesize_base,4*filesize))/max(filesize_base,4*filesize) > tollerance:
# allow 4x frame rate on camera 1
ok = True
elif float(min(4*filesize_base,filesize))/max(4*filesize_base,filesize) > tollerance:
# allow 4x frame rate on other cameras
ok = True
if not ok:
print "File sizes to not match:"
if raise_errors:
raise Exception(filenames[0][i]+ self.videoextension + " is " + str(filesize_base) + " but " + filenames[j+1][i]+ self.videoextension + " is " + str(filesize))
else:
print filenames[0][i]+ self.videoextension + " is " + str(filesize_base) + " but " + filenames[j+1][i]+ self.videoextension + " is " + str(filesize)
cont = True
while cont:
ret = raw_input("Is this ok? [Y/n] ")
if ret == '' or ret == 'y' or ret == 'Y':
cont = False
elif ret == 'n' or ret == 'N':
return False
return True
# def __del__(self):
# if self.clips != None:
# for clip in self.clips:
# clip.reader.close()
def check_files(base_folder,cameras,extension = '.MP4',raise_error=True):
"""
Used to make sure video files in corresponding
camera folders correspond to one another.
Runs check_numbers and check_sizes.
"""
if not check_numbers(base_folder,cameras,extension=extension,raise_error=raise_error):
return False
if not check_sizes(base_folder,cameras,extension=extension,raise_error=raise_error):
return False
return True
def get_file_structure(base_folder,cameras,extension='.MP4'):
"""
Returns directory listing matching extension for each camera.
"""
structure = []
for i,camera in enumerate(cameras):
path = os.path.abspath(os.path.join(base_folder, camera))
structure.append( [] )
for f in sorted(os.listdir(path)):
if extension in f:
clip_path = os.path.join(path,f)
structure[i].append(clip_path)
return structure
def get_files(base_folder,cameras,extension='.MP4'):
"""
Writes and returns the directory structure (without file extension).
The file structure is a list whose indecies correspond to the cameras' indecies.
Each index of which can be used to initiate a MultiCam class.
eg.
[
['/basefolder/camera1/GOPR01234.MP4','/basefolder/camera2/GOPR01239.MP4'],
['/basefolder/camera1/GOPR01235.MP4','/basefolder/camera2/GOPR01241.MP4']
]
Note: to make sure files are consistent run check_files before this function.
"""
# first get the directory listing of each camera
structure = get_file_structure(base_folder,cameras,extension=extension)
# now invert to get correct structure
filenames = []
for | |
<reponame>jake100/Mathics<gh_stars>1-10
# -*- coding: utf8 -*-
"""
List functions
"""
from mathics.builtin.base import (
Builtin, Test, InvalidLevelspecError,
PartError, PartDepthError, PartRangeError, SympyFunction)
from mathics.builtin.scoping import dynamic_scoping
from mathics.core.expression import Expression, String, Symbol, Integer, Number
from mathics.core.evaluation import BreakInterrupt, ContinueInterrupt
from mathics.core.rules import Pattern
from mathics.core.convert import from_sympy
from mathics.builtin.algebra import cancel
import sympy
class List(Builtin):
"""
'List' is the head of lists.
>> Head[{1, 2, 3}]
= List
Lists can be nested:
>> {{a, b, {c, d}}}
= {{a, b, {c, d}}}
"""
attributes = ('Locked',)
def apply_makeboxes(self, items, f, evaluation):
'''MakeBoxes[{items___},
f:StandardForm|TraditionalForm|OutputForm|InputForm]'''
items = items.get_sequence()
return Expression(
'RowBox', Expression('List', *list_boxes(items, f, "{", "}")))
class ListQ(Test):
"""
<dl>
<dt>'ListQ[$expr$]'
<dd>tests whether $expr$ is a 'List'.
</dl>
>> ListQ[{1, 2, 3}]
= True
>> ListQ[{{1, 2}, {3, 4}}]
= True
>> ListQ[x]
= False
"""
def test(self, expr):
return expr.get_head_name() == 'List'
class NotListQ(Test):
def test(self, expr):
return expr.get_head_name() != 'List'
def list_boxes(items, f, open=None, close=None):
result = [Expression('MakeBoxes', item, f) for item in items]
if f.get_name() in ('OutputForm', 'InputForm'):
sep = ", "
else:
sep = ","
result = riffle(result, String(sep))
if len(items) > 1:
result = Expression('RowBox', Expression('List', *result))
elif items:
result = result[0]
if result:
result = [result]
else:
result = []
if open is not None and close is not None:
return [String(open)] + result + [String(close)]
else:
return result
class Length(Builtin):
"""
>> Length[{1, 2, 3}]
= 3
'Length' operates on the 'FullForm' of expressions:
>> Length[Exp[x]]
= 2
>> FullForm[Exp[x]]
= Power[E, x]
The length of atoms is 0:
>> Length[a]
= 0
Note that rational and complex numbers are atoms, although their 'FullForm' might suggest the opposite:
>> Length[1/3]
= 0
>> FullForm[1/3]
= Rational[1, 3]
"""
def apply(self, expr, evaluation):
'Length[expr_]'
if expr.is_atom():
return Integer(0)
else:
return Integer(len(expr.leaves))
class Span(Builtin):
"""
'Span' is the head of span ranges like '1;;3'.
>> ;; // FullForm
= Span[1, All]
>> 1;;4;;2 // FullForm
= Span[1, 4, 2]
>> 2;;-2 // FullForm
= Span[2, -2]
>> ;;3 // FullForm
= Span[1, 3]
## Test parsing : 8 cases to consider
#> a ;; b ;; c // FullForm
= Span[a, b, c]
#> ;; b ;; c // FullForm
= Span[1, b, c]
#> a ;; ;; c // FullForm
= Span[a, All, c]
#> ;; ;; c // FullForm
= Span[1, All, c]
#> a ;; b // FullForm
= Span[a, b]
#> ;; b // FullForm
= Span[1, b]
#> a ;; // FullForm
= Span[a, All]
#> ;; // FullForm
= Span[1, All]
"""
# operator = ';;'
# precedence = 305
pass
def join_lists(lists):
new_list = []
for list in lists:
new_list.extend(list)
return new_list
def get_part(list, indices):
" Simple part extraction. indices must be a list of python integers. "
def rec(cur, rest):
if rest:
pos = rest[0]
if cur.is_atom():
raise PartDepthError
try:
if pos > 0:
part = cur.leaves[pos - 1]
elif pos == 0:
part = cur.head
else:
part = cur.leaves[pos]
except IndexError:
raise PartRangeError
return rec(part, rest[1:])
else:
return cur
return rec(list, indices)
def set_part(list, indices, new):
" Simple part replacement. indices must be a list of python integers. "
def rec(cur, rest):
if len(rest) > 1:
pos = rest[0]
if cur.is_atom():
raise PartDepthError
try:
if pos > 0:
part = cur.leaves[pos - 1]
elif pos == 0:
part = cur.head
else:
part = cur.leaves[pos]
except IndexError:
raise PartRangeError
rec(part, rest[1:])
elif len(rest) == 1:
pos = rest[0]
if cur.is_atom():
raise PartDepthError
try:
if pos > 0:
cur.leaves[pos - 1] = new
elif pos == 0:
cur.head = new
else:
cur.leaves[pos] = new
except IndexError:
raise PartRangeError
rec(list, indices)
def walk_parts(list_of_list, indices, evaluation, assign_list=None):
list = list_of_list[0]
# To get rid of duplicate entries (TODO: could be made faster!)
list = list.copy()
list.set_positions()
list_of_list = [list]
result = list.copy()
result.set_positions()
inner_list = [result] # changed in loop
list_of_result = [result] # to be able to change it in replace_result
def replace_item(all, item, new):
if item.position is None:
all[0] = new
else:
item.position.replace(new)
for index in indices:
if index.has_form('Span', None):
if len(index.leaves) > 3:
evaluation.message('Part', 'span', index)
return False
start = 1
stop = None
step = 1
if len(index.leaves) > 0:
start = index.leaves[0].get_int_value()
if len(index.leaves) > 1:
stop = index.leaves[1].get_int_value()
if stop is None:
if index.leaves[1].get_name() == 'All':
stop = None
else:
evaluation.message('Part', 'span', index)
return False
if len(index.leaves) > 2:
step = index.leaves[2].get_int_value()
if start is None or step is None:
evaluation.message('Part', 'span', index)
return False
start, stop = python_seq(start, stop)
for inner in inner_list:
if inner.is_atom():
evaluation.message('Part', 'partd')
return False
if stop is None:
inner.leaves = inner.leaves[start::step]
else:
inner.leaves = inner.leaves[start:stop:step]
inner.original = None
inner.set_positions()
inner_list = join_lists(inner.leaves for inner in inner_list)
elif index.has_form('List', None):
index_list = index
indices = []
for index in index_list.leaves:
if not isinstance(index, Integer):
evaluation.message('Part', 'pspec', index_list)
return False
index = index.value
if index > 0:
py_index = index - 1
else:
py_index = index
indices.append((py_index, index))
for inner in inner_list:
if inner.is_atom():
evaluation.message('Part', 'partd')
return False
new_leaves = []
for py_index, index in indices:
try:
if index != 0:
part = inner.leaves[py_index]
else:
part = inner.head
new_leaves.append(part)
except IndexError:
evaluation.message('Part', 'partw', index, inner)
return False
inner.leaves = new_leaves
inner.original = None
inner.set_positions()
inner_list = join_lists(inner.leaves for inner in inner_list)
elif isinstance(index, Integer):
index = index.value
if index > 0:
py_index = index - 1
else:
py_index = index
for inner in inner_list:
if inner.is_atom():
evaluation.message('Part', 'partd')
return False
try:
if index != 0:
part = inner.leaves[py_index]
else:
part = inner.head
except IndexError:
evaluation.message('Part', 'partw', index, inner)
return False
replace_item(list_of_result, inner, part)
part.set_positions()
inner_list = [inner.leaves[py_index] for inner in inner_list]
result = list_of_result[0]
if assign_list is not None:
def process_level(item, assignment):
if item.is_atom():
replace_item(list_of_list, item.original, assignment)
elif (assignment.get_head_name() != 'List' or
len(item.leaves) != len(assignment.leaves)):
if item.original:
replace_item(list_of_list, item.original, assignment)
else:
for leaf in item.leaves:
process_level(leaf, assignment)
else:
for sub_item, sub_assignment in zip(item.leaves,
assignment.leaves):
process_level(sub_item, sub_assignment)
process_level(result, assign_list)
return list_of_list[0]
else:
return result
def is_in_level(current, depth, start=1, stop=None):
if stop is None:
stop = current
if start < 0:
start += current + depth + 1
if stop < 0:
stop += current + depth + 1
return start <= current <= stop
def walk_levels(expr, start=1, stop=None, current=0, heads=False,
callback=lambda l: l, include_pos=False, cur_pos=[]):
if expr.is_atom():
depth = 0
new_expr = expr
else:
depth = 0
if heads:
head, head_depth = walk_levels(
expr.head, start, stop, current + 1, heads, callback,
include_pos, cur_pos + [0])
else:
head = expr.head
leaves = []
for index, leaf in enumerate(expr.leaves):
leaf, leaf_depth = walk_levels(
leaf, start, stop, current + 1, heads, callback, include_pos,
cur_pos + [index + 1])
if leaf_depth + 1 > depth:
depth = leaf_depth + 1
leaves.append(leaf)
new_expr = Expression(head, *leaves)
if is_in_level(current, depth, start, stop):
if include_pos:
new_expr = callback(new_expr, cur_pos)
else:
new_expr = callback(new_expr)
return new_expr, depth
def python_levelspec(levelspec):
def value_to_level(expr):
value = expr.get_int_value()
if value is None:
if expr == Expression('DirectedInfinity', 1):
return None
else:
raise InvalidLevelspecError
else:
return value
if levelspec.has_form('List', None):
values = [value_to_level(leaf) for leaf in levelspec.leaves]
if len(values) == 1:
return values[0], values[0]
elif len(values) == 2:
return values[0], values[1]
else:
raise InvalidLevelspecError
else:
return 1, value_to_level(levelspec)
class Level(Builtin):
"""
<dl>
<dt>'Level[$expr$, $levelspec$]'
<dd>gives a list of all subexpressions of $expr$ at the level(s) specified by $levelspec$.
</dl>
Level uses standard level specifications:
<dl>
<dt>$n$
<dd>levels 1 through $n$
<dt>'Infinity'
<dd>all levels from level 1
<dt>'{$n$}'
<dd>level $n$ only
<dt>'{$m$, $n$}'
<dd>levels $m$ through $n$
</dl>
Level 0 corresponds to the whole expression.
A negative level '-$n$' consists of parts with depth $n$.
Level -1 is the set of atoms in an expression:
>> Level[a + b ^ 3 * f[2 x ^ 2], {-1}]
= {a, b, 3, 2, x, 2}
>> Level[{{{{a}}}}, 3]
= {{a}, {{a}}, {{{a}}}}
>> Level[{{{{a}}}}, -4]
= {{{{a}}}}
>> Level[{{{{a}}}}, -5]
= {}
>> Level[h0[h1[h2[h3[a]]]], {0, -1}]
= {a, h3[a], h2[h3[a]], h1[h2[h3[a]]], h0[h1[h2[h3[a]]]]}
Use the option 'Heads | |
from __future__ import division, print_function
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import csv
import h5py
from datetime import datetime, timedelta
import logging
from numpy.random import rand
from time import time
from os.path import exists, join
import theano
import theano.tensor as T
import lasagne
from lasagne.layers import (InputLayer, ReshapeLayer, Layer,
ConcatLayer, ElemwiseSumLayer, DenseLayer,
get_all_layers, Conv1DLayer, FeaturePoolLayer,
DimshuffleLayer, ConcatLayer)
try:
from lasagne.layers import LSTMLayer, RecurrentLayer
from neuralnilm.layers import BLSTMLayer
from neuralnilm.layers import BidirectionalRecurrentLayer
except ImportError:
RECURRENT_LAYERS = [DimshuffleLayer]
else:
RECURRENT_LAYERS = [LSTMLayer, BLSTMLayer, DimshuffleLayer,
RecurrentLayer, BidirectionalRecurrentLayer]
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.utils import floatX
from lasagne.updates import nesterov_momentum
from lasagne.objectives import squared_error
from .source import quantize
from .layers import MixtureDensityLayer
from .utils import sfloatX, none_to_dict, ndim_tensor
from .plot import Plotter
from .batch_norm import batch_norm
class ansi:
# from dnouri/nolearn/nolearn/lasagne.py
BLUE = '\033[94m'
GREEN = '\033[32m'
ENDC = '\033[0m'
class TrainingError(Exception):
pass
# ####################### Neural network class ########################
class Net(object):
# Much of this code is adapted from craffel/nntools/examples/lstm.py
def __init__(self, source, layers_config,
updates_func=nesterov_momentum,
updates_kwargs=None,
learning_rate=0.1,
learning_rate_changes_by_iteration=None,
experiment_name="",
validation_interval=10,
save_plot_interval=100,
loss_function=lambda x, t: squared_error(x, t).mean(),
layer_changes=None,
seed=42,
epoch_callbacks=None,
do_save_activations=True,
plotter=Plotter(),
auto_reshape=True,
logger=None):
"""
Parameters
----------
layers_config : list of dicts. Keys are:
'type' : BLSTMLayer or a subclass of lasagne.layers.Layer
'num_units' : int
"""
if logger is None:
self.logger = logging.getLogger(experiment_name)
else:
self.logger = logger
self.logger.info("Initialising network...")
if seed is not None:
np.random.seed(seed)
self.source = source
self.updates_func = updates_func
self._learning_rate = theano.shared(
sfloatX(learning_rate), name='learning_rate')
self.logger.info(
"Learning rate initialised to {:.1E}".format(learning_rate))
self.learning_rate_changes_by_iteration = none_to_dict(
learning_rate_changes_by_iteration)
self.updates_kwargs = none_to_dict(updates_kwargs)
self.experiment_name = experiment_name
self.validation_interval = validation_interval
self.save_plot_interval = save_plot_interval
self.loss_function = loss_function
self.layer_changes = none_to_dict(layer_changes)
self.epoch_callbacks = none_to_dict(epoch_callbacks)
self.do_save_activations = do_save_activations
self.plotter = plotter
self.plotter.net = self
self.auto_reshape = auto_reshape
self.set_csv_filenames()
self.generate_validation_data_and_set_shapes()
self.validation_costs = []
self.training_costs = []
self.training_costs_metadata = []
self.layers = []
self.layer_labels = {}
# Shape is (number of examples per batch,
# maximum number of time steps per example,
# number of features per example)
input_layer = InputLayer(shape=self.input_shape)
self.layer_labels['input'] = input_layer
self.layers.append(input_layer)
self.add_layers(layers_config)
self.logger.info(
"Done initialising network for " + self.experiment_name)
def set_csv_filenames(self):
self.csv_filenames = {
'training_costs': self.experiment_name + "_training_costs.csv",
'validation_costs': self.experiment_name + "_validation_costs.csv",
'training_costs_metadata':
self.experiment_name + "_training_costs_metadata.csv",
'best_costs': self.experiment_name + "_best_costs.txt",
}
def generate_validation_data_and_set_shapes(self):
# Generate a "validation" sequence whose cost we will compute
self.validation_batch = self.source.validation_data()
self.X_val, self.y_val = self.validation_batch.data
self.input_shape = self.X_val.shape
self.n_seq_per_batch = self.input_shape[0]
self.output_shape = self.y_val.shape
self.n_outputs = self.output_shape[-1]
def add_layers(self, layers_config):
for layer_config in layers_config:
layer_type = layer_config.pop('type')
layer_label = layer_config.pop('label', None)
# Reshape if necessary
if self.auto_reshape:
prev_layer_output_shape = self.layers[-1].output_shape
n_dims = len(prev_layer_output_shape)
n_features = prev_layer_output_shape[-1]
if layer_type in RECURRENT_LAYERS:
if n_dims == 2:
seq_length = int(prev_layer_output_shape[0] /
self.source.n_seq_per_batch)
shape = (self.source.n_seq_per_batch,
seq_length,
n_features)
reshape_layer = ReshapeLayer(self.layers[-1], shape)
self.layers.append(reshape_layer)
elif layer_type in [DenseLayer, MixtureDensityLayer]:
if n_dims == 3:
# The prev layer_config was a time-aware layer_config,
# so reshape to 2-dims.
seq_length = prev_layer_output_shape[1]
shape = (self.source.n_seq_per_batch * seq_length,
n_features)
reshape_layer = ReshapeLayer(self.layers[-1], shape)
self.layers.append(reshape_layer)
# Handle references:
for k, v in layer_config.iteritems():
if isinstance(v, basestring) and v.startswith("ref:"):
v = v[4:] # remove "ref:"
label, _, attr = v.partition('.')
target_layer = self.layer_labels[label]
# layer_config[k] = getattr(target_layer, attr)
layer_config[k] = eval("target_layer.{:s}".format(attr))
print(layer_config[k])
print(type(layer_config[k]))
self.logger.info(
'Initialising layer_config : {}'.format(layer_type))
# Handle ConcatLayers
if layer_type == ConcatLayer:
incoming = [
self.layer_labels[ref]
for ref in layer_config.pop('incomings')]
else:
incoming = self.layers[-1]
# Init new layer_config
apply_batch_norm = layer_config.pop('batch_normalize', False)
layer = layer_type(incoming, **layer_config)
if apply_batch_norm:
layer = batch_norm(layer)
self.layers.append(layer)
if layer_label is not None:
self.layer_labels[layer_label] = layer
# Reshape output if necessary...
if (self.layers[-1].output_shape != self.output_shape and
layer_type != MixtureDensityLayer):
reshape_layer = ReshapeLayer(self.layers[-1], self.output_shape)
self.layers.append(reshape_layer)
self.logger.info("Total parameters = {}".format(
sum([p.get_value().size for p in
lasagne.layers.get_all_params(self.layers[-1])])))
def print_net(self):
layers = get_all_layers(self.layers[-1])
for layer in layers:
self.logger.info(str(layer))
try:
input_shape = layer.input_shape
except:
pass
else:
self.logger.info(" Input shape: {}".format(input_shape))
self.logger.info("Output shape: {}".format(layer.output_shape))
def compile(self):
self.logger.info("Compiling Theano functions...")
target_output = ndim_tensor(name='target_output', ndim=self.y_val.ndim)
network_input = ndim_tensor(name='network_input', ndim=self.X_val.ndim)
output_layer = self.layers[-1]
# Training
network_output_train = lasagne.layers.get_output(
output_layer, network_input)
loss_train = self.loss_function(network_output_train, target_output)
# Evaluation (test and validation)
network_output_eval = lasagne.layers.get_output(
output_layer, network_input, deterministic=True)
loss_eval = self.loss_function(network_output_eval, target_output)
# Updates
all_params = lasagne.layers.get_all_params(
output_layer, trainable=True)
updates = self.updates_func(
loss_train, all_params, learning_rate=self._learning_rate,
**self.updates_kwargs)
# Theano functions for training, getting output,
# and computing loss_train
self.train = theano.function(
inputs=[network_input, target_output],
outputs=loss_train,
updates=updates,
on_unused_input='warn',
allow_input_downcast=True)
deterministic_output = lasagne.layers.get_output(
output_layer, network_input, deterministic=True)
self.y_pred = theano.function(
inputs=[network_input],
outputs=deterministic_output,
on_unused_input='warn',
allow_input_downcast=True)
self.compute_cost = theano.function(
inputs=[network_input, target_output],
outputs=[loss_eval, deterministic_output],
on_unused_input='warn',
allow_input_downcast=True)
self.logger.info("Done compiling Theano functions.")
def fit(self, n_iterations=None):
# Training loop. Need to wrap this in a try-except loop so
# we can always call self.source.stop()
self.source.start()
try:
self._training_loop(n_iterations)
except:
raise
finally:
self.source.stop()
def _change_layers(self, epoch):
self.source.stop()
self.source.empty_queue()
self.logger.info("Changing layers...\nOld architecture:")
self.print_net()
layer_changes = self.layer_changes[epoch]
for layer_to_remove in range(layer_changes.get('remove_from', 0), 0):
self.logger.info(
"Removed {}".format(self.layers.pop(layer_to_remove)))
if 'callback' in layer_changes:
layer_changes['callback'](self, epoch)
self.add_layers(layer_changes['new_layers'])
self.logger.info("New architecture:")
self.print_net()
self.compile()
self.source.start()
def _save_training_costs_metadata(self):
if not self.training_costs_metadata:
return
keys = self.training_costs_metadata[-1].keys()
n_iterations = self.n_iterations()
if n_iterations == 0:
mode = 'w'
else:
mode = 'a'
with open(self.csv_filenames['training_costs_metadata'], mode) as fh:
writer = csv.DictWriter(fh, fieldnames=keys)
if n_iterations == 0:
writer.writeheader()
writer.writerow(self.training_costs_metadata[-1])
def print_and_save_training_progress(self, duration):
iteration = self.n_iterations()
train_cost = self.training_costs[-1]
validation_cost = (self.validation_costs[-1] if self.validation_costs
else None)
_write_csv_row(self.csv_filenames['training_costs'],
[iteration, train_cost, duration])
self._save_training_costs_metadata()
best_train_cost = min(self.training_costs)
best_valid_cost = min(self.validation_costs)
is_best_train = train_cost == best_train_cost
is_best_valid = validation_cost == best_valid_cost
# write bests to disk
FMT = "{:14.10f}"
N = 500
K = 25
txt = (
"BEST COSTS\n" +
("best train cost =" + FMT + " at iteration{:6d}\n").format(
best_train_cost, self.training_costs.index(best_train_cost)) +
("best valid cost =" + FMT + " at iteration{:6d}\n").format(
best_valid_cost,
self.validation_costs.index(best_valid_cost) *
self.validation_interval) +
"\n" +
"AVERAGE FOR THE TOP {:d} ITERATIONS\n".format(K) +
(" avg train cost =" + FMT + "\n").format(
np.mean(np.sort(self.training_costs)[:K])) +
(" avg valid cost =" + FMT + "\n").format(
np.mean(np.sort(self.validation_costs)[:K])) +
"\n" +
"AVERAGE COSTS FOR THE LAST {:d} ITERATIONS\n".format(N) +
(" avg train cost =" + FMT + "\n").format(
np.mean(self.training_costs[-N:])) +
(" avg valid cost =" + FMT + "\n").format(
np.mean(self.validation_costs[-N:]))
)
with open(self.csv_filenames['best_costs'], mode='w') as fh:
fh.write(txt)
# print bests to screen
print(" {:>5} | {}{:>10.6f}{} | {}{:>10.6f}{} |"
" {:>11.6f} | {:>.3f}s".format(
iteration,
ansi.BLUE if is_best_train else "",
train_cost,
ansi.ENDC if is_best_train else "",
ansi.GREEN if is_best_valid else "",
validation_cost,
ansi.ENDC if is_best_valid else "",
train_cost / validation_cost,
duration
))
if np.isnan(train_cost):
msg = "training cost is NaN at iteration {}!".format(iteration)
self.logger.error(msg)
raise TrainingError(msg)
def _write_csv_headers(self, key='all'):
if key in ['all', 'training_costs']:
_write_csv_row(
self.csv_filenames['training_costs'],
row=['iteration', 'train_cost', 'duration'],
mode='w')
if key in ['all', 'validation_costs']:
_write_csv_row(
self.csv_filenames['validation_costs'],
row=['iteration', 'validation_cost'],
mode='w')
@property
def learning_rate(self):
return self._learning_rate.get_value()
@learning_rate.setter
def learning_rate(self, rate):
rate = sfloatX(rate)
self.logger.info(
"Iteration {:d}: Change learning rate to {:.1E}"
.format(self.n_iterations(), rate))
self._learning_rate.set_value(rate)
def _training_loop(self, n_iterations):
# Adapted from dnouri/nolearn/nolearn/lasagne.py
self.logger.info("Starting training for {} iterations."
.format(n_iterations))
print("""
Update | Train cost | Valid cost | Train / Val | Secs per update
--------|--------------|--------------|---------------|----------------\
""")
iteration = self.n_iterations()
if iteration == 0:
self._write_csv_headers()
while iteration != n_iterations:
t0 = time() # for calculating training duration
iteration = len(self.training_costs)
if iteration in self.learning_rate_changes_by_iteration:
self.learning_rate = (
self.learning_rate_changes_by_iteration[iteration])
if iteration in self.layer_changes:
self._change_layers(iteration)
if iteration in self.epoch_callbacks:
self.epoch_callbacks[iteration](self, iteration)
batch = self.source.get()
X, y = batch.data
train_cost = self.train(X, y).flatten()[0]
self.training_costs.append(train_cost)
if batch.metadata:
self.training_costs_metadata.append(batch.metadata)
if not iteration % self.validation_interval:
validation_cost = self.compute_cost(self.X_val, self.y_val)[0]
validation_cost = validation_cost.flatten()[0]
self.validation_costs.append(validation_cost)
_write_csv_row(
self.csv_filenames['validation_costs'],
row=[iteration, validation_cost])
if not iteration % self.save_plot_interval:
self.save()
duration = time() - t0
self.print_and_save_training_progress(duration)
self.logger.info("Finished training")
def save(self):
self.logger.info("Saving plots...")
try:
self.plotter.plot_all()
except:
self.logger.exception("")
self.logger.info("Saving params...")
try:
self.save_params()
except:
self.logger.exception("")
self.logger.info("Saving activations...")
try:
self.save_activations()
except:
self.logger.exception("")
self.logger.info("Finished saving.")
def n_iterations(self):
return max(len(self.training_costs) - 1, 0)
def save_params(self, filename=None):
"""
Save it to HDF in the following format:
/epoch<N>/L<I>_<type>/P<I>_<name>
"""
if filename is None:
filename = self.experiment_name + ".hdf5"
mode = 'w' if self.n_iterations() == 0 else 'a'
f = h5py.File(filename, mode=mode)
epoch_name = 'epoch{:06d}'.format(self.n_iterations())
try:
epoch_group = f.create_group(epoch_name)
except ValueError:
self.logger.exception("Cannot save params!")
f.close()
return
layers = get_all_layers(self.layers[-1])
for layer_i, layer in enumerate(layers):
params = layer.get_params()
if not params:
continue
layer_name = 'L{:02d}_{}'.format(layer_i, layer.__class__.__name__)
layer_group = epoch_group.create_group(layer_name)
for param_i, param in enumerate(params):
param_name = 'P{:02d}'.format(param_i)
if param.name:
param_name += "_" + param.name
| |
header = "\nScheduled Tasks for Workbook Acceleration"
print_table(rows, columns, header)
if len(workbook_id_without_tasks) > 0:
print("*The Workbook Acceleration views for these workbooks will be updated when they "
"are published, or when their extract is refreshed.")
def get_workbooks_from_paths(server, args):
all_projects = {project.id: project for project in TSC.Pager(server.projects)}
workbook_id_to_workbook = dict()
workbook_path_mapping = parse_workbook_path(args.path_list)
for workbook_name, workbook_paths in workbook_path_mapping.items():
req_option = TSC.RequestOptions()
req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,
TSC.RequestOptions.Operator.Equals,
workbook_name))
workbooks = list(TSC.Pager(server.workbooks, req_option))
all_paths = set(workbook_paths[:])
for workbook in workbooks:
path = find_project_path(all_projects[workbook.project_id], all_projects, "")
if path in workbook_paths:
all_paths.remove(path)
workbook_id_to_workbook[workbook.id] = workbook, path
for path in all_paths:
print("Cannot find workbook path: {}, each line should only contain one workbook path"
.format(path + '/' + workbook_name))
return workbook_id_to_workbook
def get_workbook_from_path(server, workbook_path):
all_projects = {project.id: project for project in TSC.Pager(server.projects)}
workbook_id_to_workbook = dict()
workbook_path_list = workbook_path.rstrip().split('/')
workbook_project = '/'.join(workbook_path_list[:-1])
workbook_name = workbook_path_list[-1]
req_option = TSC.RequestOptions()
req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,
TSC.RequestOptions.Operator.Equals,
workbook_name))
workbooks = list(TSC.Pager(server.workbooks, req_option))
for workbook in workbooks:
path = find_project_path(all_projects[workbook.project_id], all_projects, "")
if path == workbook_project:
workbook_id_to_workbook[workbook.id] = workbook, workbook_project
break
if len(workbook_id_to_workbook) == 0:
print("Unable to find {}".format(workbook_path))
return workbook_id_to_workbook
def show_materialized_views_tasks(server, args=None, workbook_id_to_workbook=None):
tasks = list(TSC.Pager(lambda options: server.tasks.get(task_type=TSC.TaskItem.Type.DataAcceleration)))
if workbook_id_to_workbook is None and args is not None:
workbook_path = find_workbook_path(args)
if args.path_list is not None:
workbook_id_to_workbook = get_workbooks_from_paths(server, args)
elif workbook_path is not None:
workbook_id_to_workbook = get_workbook_from_path(server, workbook_path)
print_materialized_views_tasks(server, tasks, workbook_id_to_workbook)
return True
def find_last_running_job_with_status(server, workbook_id, status):
request_options = TSC.RequestOptions()
request_options.filter.add(TSC.Filter(
TSC.RequestOptions.Field.Notes,
TSC.RequestOptions.Operator.Has,
"{}: {}".format(workbook_id, status)))
request_options.sort.add(TSC.Sort(
TSC.RequestOptions.Field.CompletedAt,
TSC.RequestOptions.Direction.Desc))
request_options.page_size = 1
request_options.page_number = 1
jobs = server.jobs.get(request_options)[0]
if jobs is None or len(jobs) == 0:
return None
else:
return jobs[0]
def find_last_running_job(server, workbook):
if workbook is None:
return None
status = workbook.data_acceleration_config['acceleration_status']
if status in ["inProgress", "waiting", "unknown"]:
return None
status_to_search_term = {
"accelerated": ["Materialized", "AllMaterializedToExternalCache"],
"failed": ["JobFailed"],
"notUseful": ["MaterializationNotUseful"]
}
if status not in status_to_search_term:
return None
for status_search_term in status_to_search_term[status]:
last_running_job = find_last_running_job_with_status(server, workbook.id, status_search_term)
if last_running_job is not None:
return last_running_job
return None
def remove_materialized_views_tasks(server, tasks, workbook_id_to_workbook, schedule_name):
if workbook_id_to_workbook is None or len(workbook_id_to_workbook) == 0:
return False
if tasks is None or len(tasks) == 0:
print("Unable to find any MaterializeViews tasks")
return False
columns = ['Project/Workbook', 'Removed From Schedule']
header = "Workbooks removed from schedule"
rows = list()
removed_workbook_ids = set()
for task in tasks:
if task.target.id in workbook_id_to_workbook and task.schedule_item.name == schedule_name:
try:
server.tasks.delete(task.id, task_type=TSC.TaskItem.Type.DataAcceleration)
workbook, path = workbook_id_to_workbook[task.target.id]
removed_workbook_ids.add(workbook.id)
rows.append(['{}/{}'.format(path, workbook.name), task.schedule_item.name])
except ServerResponseError as error:
print("{}: {}".format(error.summary, error.detail))
if len(rows) > 0:
print_table(rows, columns, header)
if len(rows) < len(workbook_id_to_workbook):
no_removed_rows = list()
for workbook, path in workbook_id_to_workbook.values():
if workbook.id not in removed_workbook_ids:
no_removed_rows.append(["{}/{}".format(path, workbook.name)])
print_table(no_removed_rows, ["Project/Workbook"], "\nWorkbooks not on schedule \"{}\"".format(schedule_name))
def find_schedule_name(args):
if args.create_schedule is not None:
return args.create_schedule
if args.delete_schedule is not None:
return args.delete_schedule
if args.add_to_schedule is not None:
return args.add_to_schedule[0]
if args.remove_from_schedule is not None:
return args.remove_from_schedule[0]
def remove_workbook_from_materialized_views(server, args):
workbook_path = find_workbook_path(args)
schedule_name = find_schedule_name(args)
schedule = find_schedule(server, schedule_name)
if schedule is None:
print("Unable to find the schedule {}".format(schedule_name))
show_materialized_views_tasks(server, args)
return False
tasks = list(TSC.Pager(lambda options: server.tasks.get(task_type=TSC.TaskItem.Type.DataAcceleration)))
workbook_id_to_workbook = None
if workbook_path is not None:
workbook_id_to_workbook = get_workbook_from_path(server, workbook_path)
elif args.path_list is not None:
workbook_id_to_workbook = get_workbooks_from_paths(server, args)
remove_materialized_views_tasks(server, tasks, workbook_id_to_workbook, schedule_name)
return True
def find_schedule(server, schedule_name):
if schedule_name is None:
return None
schedules = list(TSC.Pager(server.schedules.get))
for schedule in schedules:
if schedule_name == schedule.name:
return schedule
return None
def confirm(message, options):
"""
Ask user to enter Y or N (case-insensitive).
:return: True if the answer is Y.
:rtype: bool
"""
answer = ""
while answer not in options:
answer = raw_input(message).lower()
return answer
def add_to_materialized_views_schedule(server, tasks, schedule, workbook_id_to_workbook):
if schedule is None or workbook_id_to_workbook is None:
return
workbook_id_to_schedules = dict()
if tasks is not None:
for task in tasks:
if task.target.type != 'workbook':
continue
workbook_id_to_schedules.setdefault(task.target.id, set()).add(task.schedule_item.name)
rows = list()
warnings = set()
for workbook, path in workbook_id_to_workbook.values():
warnings_error_message = "Unable to add workbook \"{}/{}\" to schedule due to".format(
path, workbook.name)
try:
server_response = server.schedules.add_to_schedule(schedule.id, workbook, task_type="dataAcceleration")
# add_to_schedule returns a non-empty list when there was an error or warning coming from the server
# when there was a warning, needs to check if the task was created
if len(server_response) == 0 or server_response[0].task_created:
workbook_id_to_schedules.setdefault(workbook.id, set()).add(schedule.name)
rows.append(["{}/{}".format(path, workbook.name),
"\n".join(sorted(workbook_id_to_schedules[workbook.id]))])
# Case 1: no warnings or error
if len(server_response) == 0:
continue
if server_response[0].task_created:
# Case 2: warnings exist, but the task was created
warnings.update(server_response[0].warnings)
elif server_response[0].warnings is not None:
# Case 3: task was not created, warnings exists
for warning in server_response[0].warnings:
warnings.add("{} {}".format(warnings_error_message, warning))
elif server_response[0].error is not None:
# Case 4: task was created, error occurred
warnings.add("{} {}".format(warnings_error_message, server_response[0].error))
except ServerResponseError as error:
print("{} {}".format(warnings_error_message, error.detail))
print_messages("Warning", sorted(warnings))
header = "Workbooks added to schedule"
columns = ['Project/Workbook', 'Schedules']
print_table(rows, columns, header)
def is_workbook_enable(workbook):
return workbook.data_acceleration_config["acceleration_enabled"]
def add_workbooks_to_schedule(server, args):
schedule_name = find_schedule_name(args)
schedule = find_schedule(server, schedule_name)
if schedule is None:
print('Unable to find the schedule "{}"'.format(schedule_name))
return False
if schedule.schedule_type != TSC.ScheduleItem.Type.DataAcceleration:
print('Schedule {} is an existing schedule but is an Extract, Flow, or Subscription schedule. '
'Use a Workbook Acceleration schedule.'.format(schedule_name))
return False
tasks = list(TSC.Pager(lambda options: server.tasks.get(task_type=TSC.TaskItem.Type.DataAcceleration)))
workbook_path = find_workbook_path(args)
workbook_id_to_workbook = None
if workbook_path is not None:
workbook_id_to_workbook = get_workbook_from_path(server, workbook_path)
if args.path_list is not None:
workbook_id_to_workbook = get_workbooks_from_paths(server, args)
add_to_materialized_views_schedule(server, tasks, schedule, workbook_id_to_workbook)
return True
def verify_time_arguments(args):
def schedule_type_none(schedule_type):
if schedule_type is not None:
print('Please select one of the schedule types: hourly-interval, daily-interval, '
'weekly-interval, monthly-interval')
return False
else:
return True
# verify start_time
if args.start_hour is None or not (0 <= args.start_hour <= 23):
print("Please provide the schedule start hour between 0 and 23.")
return False
schedule_type_selected = None
if args.daily_interval is not None:
if args.end_hour is not None or args.end_minute is not None:
print("--end-hour and --end-minutes will be ignored for --daily-interval")
schedule_type_selected = "daily-interval"
if args.weekly_interval is not None:
if schedule_type_none(schedule_type_selected):
schedule_type_selected = "weekly-interval"
else:
return False
if args.monthly_interval is not None:
if schedule_type_none(schedule_type_selected):
if not (1 <= int(args.monthly_interval) <= 31):
print('Please provide the day of month between 1 and 31')
return False
schedule_type_selected = "monthly-interval"
else:
return False
if args.hourly_interval is not None:
if schedule_type_none(schedule_type_selected):
if args.end_hour is None or not (0 <= args.end_hour <= 23):
print("Please provide the schedule end hour between 0 and 23")
return False
elif not (args.end_hour == 0 and args.end_minute == 0) and \
(args.end_hour < args.start_hour or
args.end_hour == args.start_hour
and args.end_minute < args.start_minute):
print("Invalid start time {:02d}:{:02d} and end time {:02d}:{:02d}".format(
args.start_hour, args.start_minute, args.end_hour, args.end_minute
))
else:
schedule_type_selected = 'hourly-schedule'
else:
return False
return schedule_type_selected is not None
def get_hour_interval(hour_interval):
if hour_interval in ['0.25', '0.5']:
return float(hour_interval)
else:
return int(hour_interval)
def delete_materialized_view_schedule(server, args):
if args.delete_schedule is None:
print("Unable to find the schedule name to delete")
return
schedules = TSC.Pager(server.schedules)
schedule_deleted = False
for schedule in schedules:
if schedule.name == args.delete_schedule:
try:
server.schedules.delete(schedule.id)
print("Schedule \"{}\" is deleted".format(schedule.name))
schedule_deleted = True
break
except Exception as ex:
print("Unable to delete schedule \"{}\" due to {}".format(schedule.name, ex))
if not schedule_deleted:
print("Unable to find \"{}\" to delete".format(args.delete_schedule))
def show_materialized_view_schedules(server):
schedules = TSC.Pager(server.schedules)
local_tz = tz.tzlocal()
rows = list()
for schedule in schedules:
if schedule.schedule_type != TSC.ScheduleItem.Type.DataAcceleration:
continue
rows.append([schedule.name,
schedule.next_run_at.astimezone(local_tz) if schedule.next_run_at is not None else None])
print_table(rows, ["Name", "Next Run At"],
"Data Acceleration Schedule")
def create_hourly_schedule(server, args):
hourly_interval = TSC.HourlyInterval(start_time=time(args.start_hour, args.start_minute),
end_time=time(args.end_hour, args.end_minute),
interval_value=get_hour_interval(args.hourly_interval))
schedule_name = args.create_schedule
hourly_schedule = TSC.ScheduleItem(schedule_name, 75, TSC.ScheduleItem.Type.DataAcceleration,
TSC.ScheduleItem.ExecutionOrder.Parallel, hourly_interval)
hourly_schedule = server.schedules.create(hourly_schedule)
if hourly_schedule is not None:
print("Hourly schedule \"{}\" created with an interval of {} hours.".format(
schedule_name, args.hourly_interval))
if hasattr(hourly_schedule, "warnings"):
print_messages("Warning", hourly_schedule.warnings)
else:
print("Failed to create schedule {}".format(schedule_name))
def create_daily_schedule(server, args):
daily_interval = TSC.DailyInterval(start_time=time(args.start_hour, args.start_minute))
schedule_name = args.create_schedule
daily_schedule = TSC.ScheduleItem(schedule_name, 75, TSC.ScheduleItem.Type.DataAcceleration,
TSC.ScheduleItem.ExecutionOrder.Parallel, daily_interval)
daily_schedule = server.schedules.create(daily_schedule)
if daily_schedule is not None:
print("Daily schedule \"{}\" created to run at {:02d}:{:02d}.".format(
schedule_name, int(args.start_hour), int(args.start_minute)))
if hasattr(daily_schedule, "warnings"):
print_messages("Warning", daily_schedule.warnings)
else:
print("Failed to create schedule {}".format(schedule_name))
def create_weekly_schedule(server, args):
weekly_interval = TSC.WeeklyInterval(time(args.start_hour, args.start_minute),
*args.weekly_interval)
schedule_name = args.create_schedule
weekly_schedule = TSC.ScheduleItem(schedule_name, 75, TSC.ScheduleItem.Type.DataAcceleration,
TSC.ScheduleItem.ExecutionOrder.Parallel, weekly_interval)
weekly_schedule = server.schedules.create(weekly_schedule)
if weekly_schedule is not None:
print("Weekly schedule \"{}\" created to run on {} at {:02d}:{:02d}.".format(
schedule_name, args.weekly_interval, int(args.start_hour), int(args.start_minute)))
if | |
<gh_stars>1-10
### Functions for magnetic problems with polygonal prisms
import numpy as np
from fatiando import utils
from fatiando.gravmag import polyprism
from fatiando.mesher import PolygonalPrism
from fatiando.constants import CM, T2NT
from copy import deepcopy
### Functions for the foward problem using fatiando
def area_polygon(x, y):
'''
Returns the area of a polygon using the shoelace
formula.
input
x: 1D array - Cartesian coordinates
y: 1D array - Cartesian coordinates
output
area: float - area of the polygon
'''
assert x.size == y.size, 'x and y must have the same size'
assert x.shape == y.shape, 'x, y and z must have the same shape'
assert x.size == y.size >= 3, 'x and y must have the same size'
x = np.asanyarray(x)
y = np.asanyarray(y)
n = len(x)
shift_up = np.arange(-n+1, 1)
shift_down = np.arange(-1, n-1)
area = (x * (y.take(shift_up) - y.take(shift_down))).sum() / 2.0
return abs(area)
def volume_polygon(model):
'''
Returns the volume of a list of polygonal prisms.
input
model: list - list of fatiando.mesher.PolygonalPrism
output
volume: float - volume of the model
'''
volume = 0
for m in model:
volume += area_polygon(m.x,m.y)*(m.z2 - m.z1)
return volume
def pol2cart(l, M, L):
'''
This function transforms polar coordinates of the prisms
into Cartesian coordinates and returns a list of polygonal
prisms of the Fatiando a Terra.
input
l: list - each element is a list of [r, x0, y0, z1, z2, 'magnetization'],
whrere r is an array with the radial distances of the vertices,
x0 and y0 are the origin Cartesian coordinates of each prism,
z1 and z2 are the top and bottom of each prism and
magnetization is physical property
M: int - number of vertices per prism
L: int - number of prisms
output
lk: list - list of objects of the class
fatiando.mesher.PolygonalPrism
'''
lk = []
r = np.zeros(M) # it contains radial distances of the vertices in polar coordinates
verts = [] # it contains radial distrantances of the vertices in Cartesian coordinates
assert len(l) == L, 'The size of m and the number of prisms must be equal'
for lv in l:
assert len(lv) == 6, 'Each element of l must have 6 elements'
assert len(lv[0]) == M, 'All prisms must have M vertices'
ang = 2*np.pi/M # angle between two vertices
for lv in l:
r = lv[0]
verts = []
for i in range(M):
verts.append([r[i]*np.cos(i*ang) + lv[1], r[i]*np.sin(i*ang) + lv[2]])
lk.append(PolygonalPrism(verts, lv[3], lv[4], lv[5]))
return lk
def param_vec(l, M, L):
'''
This function receives the model of prisms and returns the vector of parameters
input
l: list - each element is a list of [r, x0, y0, z1, z2, 'magnetization'],
whrere r is an array with the radial distances of the vertices,
x0 and y0 are the origin cartesian coordinates of each prism,
z1 and z2 are the top and bottom of each prism and
magnetization is physical property
M: int - number of vertices per prism
L: int - number of prisms
output
pv: 1D array - parameters vector
'''
pv = np.zeros(0) # parameters vector
lv = [] # list for the loop of asserts
assert len(l) == L, 'The size of m and the number of prisms must be equal'
for lv in l:
assert len(lv) == 6, 'Each element of l must have 6 elements'
assert len(lv[0]) == M, 'All prisms must have M vertices'
assert lv[0][:M].all() > 0., 'All radius must be positives'
for i in range(L):
pv = np.hstack((pv, l[i][0], l[i][1:3]))
pv = np.hstack((pv, l[0][4] - l[0][3]))
return pv
def param2polyprism(m, M, L, z0, props):
'''
Returns a lis of objects of the class
fatiando.mesher.PolygonalPrism
input
m: 1D array - parameter vector
M: int - number of vertices
L: int - number of prisms
z0: float - top of the model
props: dictionary - physical property
output
model: list - list of fatiando.mesher.PolygonalPrism
'''
P = L*(M + 2) + 1
assert m.size == P, 'The size of m must be equal to L*(M + 2) + 1'
#assert m[-1] > 0., 'The thickness dz must be a positive number'
for i in range(P-1):
assert m[i:i+M].all >= 0., 'The radial distances must be positives'
r = np.zeros(M) # vector for radial distances
model = [] # list of prisms
k = 0.
for i in range(0, P-1, M + 2):
r = m[i:M+i]
model.append([r, m[i+M], m[i+M+1], z0 + m[-1]*k, z0 + m[-1]*(k + 1.), props])
k = k + 1.
model = pol2cart(model, M, L)
return model
### Functions for the derivatives with finite differences
def derivative_tf_x0(xp, yp, zp, m, M, delta, inc, dec):
'''
This function calculates the derivative for total field anomaly
for x0 coordinate of a model of polygonal prisms using
finite difference.
input
xp, yp, zp: 1D array - observation points
m: list - list of one fatiando.mesher.PolygonalPrism
M: int - number of vertices per prism
delta: float - increment for differentiation
inc: float - inclination of the local-geomagnetic field
dec: float - declination of the local-geomagnetic field
output
df: 1D array - derivative of x0 coordinate
'''
assert xp.size == yp.size == zp.size, 'The number of points in x, y and z must be equal'
assert xp.shape == yp.shape == zp.shape, 'xp, yp and zp must have the same shape'
assert m.x.size == m.y.size == M, 'The number of vertices must be M'
assert delta > 0., 'delta must be a positive number'
mp = deepcopy([m]) # m.x + delta
mm = deepcopy([m]) # m.x - delta
mp[0].x += delta
mm[0].x -= delta
df = polyprism.tf(xp, yp, zp, mp, inc, dec)
df -= polyprism.tf(xp, yp, zp, mm, inc, dec)
df /= (2.*delta)
return df
def derivative_tf_y0(xp, yp, zp, m, M, delta, inc, dec):
'''
This function calculates the derivative for total field anomaly
for y0 coordinate of a model of polygonal prisms using
finite difference.
input
xp, yp, zp: 1D array - observation points
m: list - list of one fatiando.mesher.PolygonalPrism
M: int - number of vertices per prism
delta: float - increment for differentiation
inc: float - inclination of the local-geomagnetic field
dec: float - declination of the local-geomagnetic field
output
df: 1D array - derivative of x0 coordinate
'''
assert xp.size == yp.size == zp.size, 'The number of points in x, y and z must be equal'
assert xp.shape == yp.shape == zp.shape, 'xp, yp and zp must have the same shape'
assert m.x.size == m.y.size == M, 'The number of vertices must be M'
assert delta > 0., 'delta must be a positive number'
mp = deepcopy([m]) # m.y + delta
mm = deepcopy([m]) # m.y - delta
mp[0].y += delta
mm[0].y -= delta
df = polyprism.tf(xp, yp, zp, mp, inc, dec)
df -= polyprism.tf(xp, yp, zp, mm, inc, dec)
df /= (2.*delta)
return df
def derivative_tf_radial(xp, yp, zp, m, M, nv, delta, inc, dec):
'''
This function calculates the derivative for total field anomaly
for radial coordinate of a set of polygonal prisms using
finite difference.
input
xp, yp, zp: 1D array - observation points
m: list - list of a fatiando.mesher.PolygonalPrism
M: int - number of vertices per prism
nv: int - number of the vertice for the derivative
delta: float - increment for differentiation
inc: float - inclination of the local-geomagnetic field
dec: float - declination of the local-geomagnetic field
output
df: 1D array - derivative of radial distance
'''
assert xp.size == yp.size == zp.size, 'The number of points in x, y and z must be equal'
assert xp.shape == yp.shape == zp.shape, 'xp, yp and zp must have the same shape'
assert m.x.size == m.y.size == M, 'The number of vertices must be M'
assert nv < M, 'The vertice number must be smaller than the number of vertices (0 - M)'
assert delta > 0., 'delta must be a positive number'
m_fat = [] # list of objects of the | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# check-html-ids.py - Catch and fix id= errors. Makes life easier
# inserting new Navigation Bars without having to renumber dozens of
# existing sections below.
#
# Feb 21 2022 - Initial version.
#
# ==============================================================================
"""
Creates a backup of all files that might bne modified. Must delete the
existing backups before running program.
"""
from __future__ import print_function # Must be first import
from __future__ import with_statement # Error handling for file opens
import os # Test if directory exists
import shutil # Make backup files to .bak extension
# Filenames are relative to program directory "/website/sede"
# answers.md, about.md, hrb.md and programs.md have no section navigation bars
EXTRA_SEARCH_FILES = ['../programs/hyperlink.md', '../index.md',
'../programs/mserve.md', '../programs/mt.md',
'../programs/stack.md', '../programs/tcm.md',
'../programs/tim-ta.md']
CONTENTS = "{% include toc.md %}"
TOC_id_no = None # Used to jump to "ToC" button, E.G. int(2)
last_id_no = None # Used to override "Skip" button E.G. int(25)
id_lines = [] # line index where ID number HTML is inserted
div_lines = [] # line index where div line HTML is inserted
lines_changed = 0 # How many lines were changed ID and DIV
classes_changed = 0 # How many classes were changed
def fatal_error(msg):
""" Print fatal error and exit program """
print('#' * 80)
print('#', ' ' * 31, "FATAL ERROR", ' ' * 32, '#')
print('#' * 80)
print('')
print(msg)
exit()
def read_file(fname):
""" Read markdown (.md file) and return as list of
lines: config[]
"""
fname_bak = fname + ".bak"
if os.path.exists(fname_bak):
fatal_error("'The backup file: '" + fname_bak + "' already exists!")
prt = " CREATE BACKUP --> Copy: '" + fname + \
"' To: '" + fname_bak + "'"
banner(prt, style=0)
shutil.copy(fname, fname_bak)
if not os.path.exists(fname):
fatal_error("'The file: '" + fname + "' was not found!")
with open(fname, 'r') as fn:
all_lines = fn.readlines()
config = [one_line.rstrip() for one_line in all_lines]
return config
def save_file(fname, lines):
""" Save markdown (.md file) using return as list of
lines: config[]
"""
if not os.path.exists(fname):
fatal_error("'The file: '" + fname + "' was not found!")
""" Write markdown file with updated ID HTML elements """
with open(fname, 'w') as fh:
# Write everything
for ln in lines:
fh.write(ln + "\n")
# noinspection PyUnboundLocalVariable
def banner(msg, style=1, length=78):
"""
Print message inside 80 character line draw box
Pass style of 1, 2 or 3. Pass 0 to get random style
Default length is 78 for a message box 80 characters wide.
Enhancements: Pass list of messages for multi-line support
"""
if style == 0:
import random
samples = random.sample(range(1, 4), 1)
style = samples[0]
print()
if style == 1:
nw = '┌'; ns = '─'; ne = '┐'; we = '│'; se = '┘'; sw = '└'
if style == 2:
nw = '┏'; ns = '━'; ne = '┓'; we = '┃'; se = '┛'; sw = '┗'
if style == 3:
nw = '╔'; ns = '═'; ne = '╗'; we = '║'; se = '╝'; sw = '╚'
print(nw + ns * length + ne)
print(we + msg.ljust(length) + we)
print(sw + ns * length + se)
def process_extra_files():
"""
Add search words from markdown files.
EXTRA_SEARCH_FILES = ['../about.md', '../answers.md', ...]
html_url contains 'https://pippim.github.io'
"""
file_count = len(EXTRA_SEARCH_FILES)
print('Processing', file_count, 'extra search files')
for i, extra in enumerate(EXTRA_SEARCH_FILES):
all_lines = read_file(extra)
check_lines(all_lines)
update_lines(all_lines)
if lines_changed > 0:
print('lines changed:', lines_changed)
if classes_changed > 0:
print('classes changed:', classes_changed)
# Write changes to disk
save_file(extra, all_lines)
else:
backup_name = extra + '.bak'
print(" No changes were made to file. Backup file: '" +
backup_name + "' removed.")
os.remove(backup_name)
def check_lines(lines):
"""
First pass to setup line indexes for <a id="hdrX" tags
Also removes deprecated class="hdr-btn"
"""
global lines_changed, classes_changed, TOC_id_no, last_id_no, id_lines, div_lines
# Initialize global variables used in update function
lines_changed = 0 # How many lines were changed due to class deprecations
classes_changed = 0 # How many deprecated classes were removed?
TOC_id_no = None # ID number for table of contents
last_id_no = None # Last ID number on file (usually the footer)
id_lines = [] # List of line index for each ID number
div_lines = [] # list of line index for each division (usually ID + 1)
# local variables
current_number = 0
number_of_groups = 0
in_code_block = False
first_id = True
for i, ln in enumerate(lines):
ln_strip = ln.lstrip()
if in_code_block: # Are we currently in code block?
if ln_strip.startswith('```'):
in_code_block = False # End code block
continue # NOTE indent level, always executed
else:
if ln_strip.startswith('```'):
in_code_block = True # Start code block
continue
if ln.startswith('<a id="hdr'):
next_line = lines[i + 1]
# Possible TOC line is always set even if already known
try:
possible_TOC_line = lines[i + 2]
if possible_TOC_line == "":
possible_TOC_line = lines[i + 3]
except IndexError:
possible_TOC_line = "" # End of file
number_of_groups += 1
else:
continue # HTML lines are skipped over
current_number += 1
number = ln.split('"')[1] # Grab hdrX from line
number = number.replace('hdr', '') # Grab X from hdrX
if current_number != int(number):
print('Current ID number should be:', current_number,
' | But number was:', number)
if ' class="hdr-btn"' in next_line:
count = next_line.count(' class="hdr-btn"')
if classes_changed == 0:
error(i, number, ln,
"Deprecated class 'hdr-btn' found: " + str(count)
+ " times.", next_line)
classes_changed += count
lines_changed += 1
if classes_changed > 0:
next_line = next_line.replace(' class="hdr-btn"', '')
lines[i + 1] = next_line # Update mutable line in list
next_line_groups = next_line.split('> <')
group_count = len(next_line_groups)
if TOC_id_no is None and possible_TOC_line == CONTENTS:
TOC_id_no = current_number
# The first button bar should have count of 3 (div, ToC and Skip)
if first_id and group_count != 3:
error(i, number, ln,
"First ID group count (div + buttons) should be 3 but is: " +
str(group_count), next_line_groups)
# If TOC, then it should have count of 4 (div, Top, ToS and Skip)
if first_id is False:
if TOC_id_no == current_number and group_count != 4:
error(i, number, ln,
"TOC line; group count (div + buttons) should be 4 but is: " +
str(group_count), next_line_groups, possible_TOC_line)
# First group on next line should be: '<div class="hdr-bar"'
if next_line_groups[0] != '<div class="hdr-bar"':
error(i, number, ln,
'Invalid Group 1[0]: <div class="hdr-bar" expected but found: ' +
next_line_groups[0], next_line_groups)
div_lines.append(i) # The div line will have to be on ID line for now
else:
div_lines.append(i+1) # The div line follows the ID line as it should
id_lines.append(i)
# Should always have a "Top" button except on the first ID
if first_id is False and next_line_groups[1] != 'a href="#">Top</a':
error(i, number, ln,
'Invalid Group 2[1]: a href="#">Top</a expected but found: ' +
next_line_groups[1], next_line_groups)
first_id = False
last_id_no = current_number
def error(i, number, ln, line1, line2="", line3=""):
""" Print error messages """
print('')
print(line1)
if line2 != "":
if isinstance(line2, list):
# line2[0] = "" # Blank out '<div class="hdr-bar"> '
print(line2[1:])
else:
print(line2)
if line3 != "":
print(line3)
print('Lines Index:', i, '| ID Number:', number, '| Line:', ln)
def update_lines(lines):
"""
NOTE: TOC_id_no and last_id_no are globally defined.
check_lines() function may have set lines_changed already.
"""
global lines_changed
if last_id_no < 2:
print("\nThere are less than 2 ID's in the file. Aborting...")
exit()
div_str = '<div class="hdr-bar">'
end_div = '</div>'
top_str = ' <a href="#">Top</a>'
if TOC_id_no is None:
toc_str = ""
toc_test = 9999999 # Impossibly large IO number
else:
toc_str = ' <a href="#hdr' + str(TOC_id_no) + '">ToC</a>'
toc_test = TOC_id_no
for i, id_ndx in enumerate(id_lines):
div_ndx = div_lines[i]
old_id_line = lines[id_ndx]
old_div_line = lines[div_ndx]
new_id = '<a id="hdr' + str(i + 1) + '"></a>'
tos_str = ' <a href="#hdr' + str(i) + '">ToS</a>'
skip_str = ' <a href="#hdr' + str(i + 2) + '">Skip</a>'
if i == 0: # First ID?
new_div = div_str + toc_str + skip_str + end_div
elif i == toc_test - 1: # TOC ID?
new_div = div_str + top_str + tos_str + | |
from collections import Counter
from warnings import warn
from contextlib import suppress
import numpy as np
import matplotlib.collections as mcoll
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
from ..exceptions import PlotnineWarning
from ..doctools import document
from ..utils import to_rgba, make_line_segments
from ..utils import SIZE_FACTOR, match
from .geom import geom
@document
class geom_path(geom):
"""
Connected points
{usage}
Parameters
----------
{common_parameters}
lineend : str (default: butt)
Line end style, of of *butt*, *round* or *projecting.*
This option is applied for solid linetypes.
linejoin : str (default: round)
Line join style, one of *round*, *miter* or *bevel*.
This option is applied for solid linetypes.
arrow : plotnine.geoms.geom_path.arrow (default: None)
Arrow specification. Default is no arrow.
See Also
--------
plotnine.geoms.arrow : for adding arrowhead(s) to paths.
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'linetype': 'solid',
'size': 0.5}
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False,
'lineend': 'butt', 'linejoin': 'round',
'arrow': None}
def handle_na(self, data):
def keep(x):
# first non-missing to last non-missing
first = match([False], x, nomatch=1, start=0)[0]
last = len(x) - match([False], x[::-1], nomatch=1, start=0)[0]
bool_idx = np.hstack([np.repeat(False, first),
np.repeat(True, last-first),
np.repeat(False, len(x)-last)])
return bool_idx
# Get indices where any row for the select aesthetics has
# NaNs at the beginning or the end. Those we drop
bool_idx = (data[['x', 'y', 'size', 'color', 'linetype']]
.isnull() # Missing
.apply(keep, axis=0)) # Beginning or the End
bool_idx = np.all(bool_idx, axis=1) # Across the aesthetics
# return data
n1 = len(data)
data = data[bool_idx]
data.reset_index(drop=True, inplace=True)
n2 = len(data)
if (n2 != n1 and not self.params['na_rm']):
msg = "geom_path: Removed {} rows containing missing values."
warn(msg.format(n1-n2), PlotnineWarning)
return data
def draw_panel(self, data, panel_params, coord, ax, **params):
if not any(data['group'].duplicated()):
warn("geom_path: Each group consist of only one "
"observation. Do you need to adjust the "
"group aesthetic?", PlotnineWarning)
# drop lines with less than two points
c = Counter(data['group'])
counts = np.array([c[v] for v in data['group']])
data = data[counts >= 2]
if len(data) < 2:
return
# dataframe mergesort is stable, we rely on that here
data = data.sort_values('group', kind='mergesort')
data.reset_index(drop=True, inplace=True)
# When the parameters of the path are not constant
# with in the group, then the lines that make the paths
# can be drawn as separate segments
cols = {'color', 'size', 'linetype', 'alpha', 'group'}
cols = cols & set(data.columns)
df = data.drop_duplicates(cols)
constant = len(df) == data['group'].nunique()
params['constant'] = constant
if not constant:
self.draw_group(data, panel_params, coord, ax, **params)
else:
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
self.draw_group(gdata, panel_params, coord, ax, **params)
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
data = coord.transform(data, panel_params, munch=True)
data['size'] *= SIZE_FACTOR
constant = params.pop('constant', data['group'].nunique() == 1)
if not constant:
_draw_segments(data, ax, **params)
else:
_draw_lines(data, ax, **params)
if 'arrow' in params and params['arrow']:
params['arrow'].draw(
data, panel_params, coord,
ax, constant=constant, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a horizontal line in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
data['size'] *= SIZE_FACTOR
x = [0, da.width]
y = [0.5 * da.height] * 2
key = mlines.Line2D(x,
y,
alpha=data['alpha'],
linestyle=data['linetype'],
linewidth=data['size'],
color=data['color'],
solid_capstyle='butt',
antialiased=False)
da.add_artist(key)
return da
class arrow:
"""
Define arrow (actually an arrowhead)
This is used to define arrow heads for
:class:`.geom_path`.
Parameters
----------
angle : int | float
angle in degrees between the tail a
single edge.
length : int | float
of the edge in "inches"
ends : str in ``['last', 'first', 'both']``
At which end of the line to draw the
arrowhead
type : str in ``['open', 'closed']``
When it is closed, it is also filled
"""
def __init__(self, angle=30, length=0.2,
ends='last', type='open'):
self.angle = angle
self.length = length
self.ends = ends
self.type = type
def draw(self, data, panel_params, coord, ax, constant=True, **params):
"""
Draw arrows at the end(s) of the lines
Parameters
----------
data : dict
plot information as required by geom.draw
scales : dict
x scale, y scale
ax : axes
On which to draw
constant: bool
If the path attributes vary along the way. If false,
the arrows are per segment of the path
"""
first = self.ends in ('first', 'both')
last = self.ends in ('last', 'both')
data = data.sort_values('group', kind='mergesort')
data['color'] = to_rgba(data['color'], data['alpha'])
if self.type == 'open':
data['facecolor'] = 'none'
else:
data['facecolor'] = data['color']
if not constant:
# Get segments/points (x1, y1) -> (x2, y2)
# for which to calculate the arrow heads
idx1, idx2 = [], []
for _, df in data.groupby('group'):
idx1.extend(df.index[:-1])
idx2.extend(df.index[1:])
d = dict(
zorder=params['zorder'],
rasterized=params['raster'],
edgecolor=data.loc[idx1, 'color'],
facecolor=data.loc[idx1, 'facecolor'],
linewidth=data.loc[idx1, 'size'],
linestyle=data.loc[idx1, 'linetype']
)
x1 = data.loc[idx1, 'x'].values
y1 = data.loc[idx1, 'y'].values
x2 = data.loc[idx2, 'x'].values
y2 = data.loc[idx2, 'y'].values
if first:
paths = self.get_paths(x1, y1, x2, y2,
panel_params, coord, ax)
coll = mcoll.PathCollection(paths, **d)
ax.add_collection(coll)
if last:
x1, y1, x2, y2 = x2, y2, x1, y1
paths = self.get_paths(x1, y1, x2, y2,
panel_params, coord, ax)
coll = mcoll.PathCollection(paths, **d)
ax.add_collection(coll)
else:
d = dict(
zorder=params['zorder'],
rasterized=params['raster'],
edgecolor=data['color'].iloc[0],
facecolor=data['facecolor'].iloc[0],
linewidth=data['size'].iloc[0],
linestyle=data['linetype'].iloc[0],
joinstyle='round',
capstyle='butt'
)
if first:
x1, x2 = data['x'].iloc[0:2]
y1, y2 = data['y'].iloc[0:2]
x1, y1, x2, y2 = [np.array([i])
for i in (x1, y1, x2, y2)]
paths = self.get_paths(x1, y1, x2, y2,
panel_params, coord, ax)
patch = mpatches.PathPatch(paths[0], **d)
ax.add_artist(patch)
if last:
x1, x2 = data['x'].iloc[-2:]
y1, y2 = data['y'].iloc[-2:]
x1, y1, x2, y2 = x2, y2, x1, y1
x1, y1, x2, y2 = [np.array([i])
for i in (x1, y1, x2, y2)]
paths = self.get_paths(x1, y1, x2, y2,
panel_params, coord, ax)
patch = mpatches.PathPatch(paths[0], **d)
ax.add_artist(patch)
def get_paths(self, x1, y1, x2, y2, panel_params, coord, ax):
"""
Compute paths that create the arrow heads
Parameters
----------
x1, y1, x2, y2 : array_like
List of points that define the tails of the arrows.
The arrow heads will be at x1, y1. If you need them
at x2, y2 reverse the input.
Returns
-------
out : list of Path
Paths that create arrow heads
"""
Path = mpath.Path
# Create reusable lists of vertices and codes
# arrowhead path has 3 vertices (Nones),
# plus dummy vertex for the STOP code
verts = [None, None, None,
(0, 0)]
# codes list remains the same after initialization
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.STOP]
# Slices into the vertices list
slc = slice(0, 3)
# We need the axes dimensions so that we can
# compute scaling factors
width, height = _axes_get_size_inches(ax)
ranges = coord.range(panel_params)
width_ = np.ptp(ranges.x)
height_ = np.ptp(ranges.y)
# scaling factors to prevent skewed arrowheads
lx = self.length * width_/width
ly = self.length * height_/height
# angle in radians
a = self.angle * np.pi / 180
# direction of arrow head
xdiff, ydiff = x2 - x1, y2 - y1
rotations = np.arctan2(ydiff/ly, xdiff/lx)
# Arrow head vertices
v1x = x1 + lx * np.cos(rotations + a)
v1y = y1 + ly * np.sin(rotations + a)
v2x = x1 + lx * np.cos(rotations - a)
v2y = y1 + ly * np.sin(rotations - a)
# create a path for each arrow head
paths = []
for t in zip(v1x, v1y, x1, y1, v2x, v2y):
verts[slc] = [t[:2], t[2:4], t[4:]]
paths.append(Path(verts, codes))
return paths
def _draw_segments(data, ax, **params):
"""
Draw independent line segments between all the
points
"""
color = to_rgba(data['color'], data['alpha'])
# All we do is line-up all the points in a group
# into segments, all in a single list.
# Along the way the other parameters are put in
# sequences accordingly
indices = [] # for attributes of starting point of each segment
segments = []
for _, df in data.groupby('group'):
idx = df.index
indices.extend(idx[:-1]) # One line from two points
x = data['x'].iloc[idx]
y = data['y'].iloc[idx]
segments.append(make_line_segments(x, y, ispath=True))
segments = np.vstack(segments)
if color is None:
edgecolor = color
else:
edgecolor = [color[i] for i in indices]
linewidth = data.loc[indices, 'size']
linestyle = data.loc[indices, 'linetype']
coll = mcoll.LineCollection(
segments,
edgecolor=edgecolor,
linewidth=linewidth,
linestyle=linestyle,
zorder=params['zorder'],
rasterized=params['raster']
)
ax.add_collection(coll)
def | |
<filename>restclients_core/dao.py
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import random
import datetime
from restclients_core.util.mock import load_resource_from_path
from restclients_core.util.local_cache import (
set_cache_value, get_cache_value)
from restclients_core.models import MockHTTP, CacheHTTP
from restclients_core.exceptions import (
ImproperlyConfigured, DataFailureException)
from restclients_core.cache import NoCache
from restclients_core.util.performance import PerformanceDegradation
from importlib import import_module
from commonconf import settings
from urllib3 import connection_from_url
from urllib3.util import Timeout
from urllib3.util.retry import Retry
from urllib3.exceptions import HTTPError
from prometheus_client import Histogram, Counter
from logging import getLogger
from dateutil.parser import parse
from urllib.parse import urlparse
import time
import ssl
logger = getLogger(__name__)
# prepare for prometheus observations
prometheus_duration = Histogram('restclient_request_duration_seconds',
'Restclient request duration (seconds)',
['service'])
prometheus_status = Histogram('restclient_response_status_code',
'Restclient web service response status code',
['service'],
buckets=[100, 200, 300, 400, 500])
prometheus_timeout = Counter('restclient_request_timeout',
'Restclient web service request timeout count',
['service'])
prometheus_ssl_error = Counter('restclient_request_ssl_error',
'Restclient web service SSL error count',
['service'])
class DAO(object):
"""
Base class for per-service interfaces.
"""
_cache_instance = None
def __init__(self):
# format is ISO 8601
log_start_str = self.get_service_setting("TIMING_START", None)
log_end_str = self.get_service_setting("TIMING_END", None)
if log_start_str is not None and log_end_str is not None:
self.log_start = parse(log_start_str)
self.log_end = parse(log_end_str)
else:
self.log_start = None
self.log_end = None
self.log_timing = self.get_service_setting("TIMING_LOG_ENABLED", False)
self.logging_rate = float(self.get_service_setting("TIMING_LOG_RATE",
1.0))
def service_name(self):
"""
This method must be overridden to define your service's short name.
This name is used in multiple places. The Mock DAO uses it in path
names for file, and the Django app for browsing services uses it as
part of the URL.
"""
raise Exception("service_name must be defined per DAO")
def _custom_headers(self, method, url, headers, body):
"""
This method can be overridden to add headers to a request. For
example, a Bearer header can be added if a service uses OAuth tokens.
"""
# to handle things like adding a bearer token
pass
def _custom_response_edit(self, method, url, headers, body, response):
"""
This method allows a service to edit a response.
If you want to do this, you probably really want to use
_edit_mock_response - this method will operate on Live resources.
"""
if self.get_implementation().is_mock():
delay = self.get_setting("MOCKDATA_DELAY", 0.0)
time.sleep(delay)
self._edit_mock_response(method, url, headers, body, response)
def _edit_mock_response(self, method, url, headers, body, response):
"""
Override this method to edit responses in mock resources. This can be
used to ensure datetime fields have useful values relative to now,
or to provide more dynamic behavior for PUT/POST/DELETE requests.
This method should edit the response object directly. No return value.
"""
pass
def get_default_service_setting(self, key):
"""
A hook for setting useful defaults. For example, if you have a host
your service almost always uses, you can have this method return that
value when passed 'HOST'.
"""
return None
def getURL(self, url, headers={}):
"""
Request a URL using the HTTP method GET
"""
return self._load_resource("GET", url, headers, None)
def postURL(self, url, headers={}, body=None):
"""
Request a URL using the HTTP method POST.
"""
return self._load_resource("POST", url, headers, body)
def putURL(self, url, headers, body=None):
"""
Request a URL using the HTTP method PUT.
"""
return self._load_resource("PUT", url, headers, body)
def patchURL(self, url, headers, body):
"""
Request a URL using the HTTP method PATCH.
"""
return self._load_resource("PATCH", url, headers, body)
def deleteURL(self, url, headers=None):
"""
Request a URL using the HTTP method DELETE.
"""
return self._load_resource("DELETE", url, headers, None)
def service_mock_paths(self):
"""
If your web service client ships with mock resources, override this
method to return a list of top level paths where they can be found.
e.g. If your resource is in
/users/my/my_client/resources/client/file/hello.json
you should generate ["/users/my/my_client/resources"]
"""
return []
def _load_resource(self, method, url, headers, body):
start_time = time.time()
service = self.service_name()
bad_response = PerformanceDegradation.get_response(service, url)
if bad_response:
return bad_response
custom_headers = self._custom_headers(method, url, headers, body)
if custom_headers:
headers.update(custom_headers)
is_cacheable = self._is_cacheable(method, url, headers, body)
cache = self.get_cache()
if is_cacheable:
cache_response = cache.getCache(service, url, headers)
if cache_response:
if "response" in cache_response:
self._log(service=service, url=url, method=method,
response=cache_response["response"],
cached=True, start_time=start_time)
return cache_response["response"]
if "headers" in cache_response:
headers = cache_response["headers"]
backend = self.get_implementation()
response = backend.load(method, url, headers, body)
self.prometheus_duration(time.time() - start_time)
self.prometheus_status(response)
self._custom_response_edit(method, url, headers, body, response)
if is_cacheable:
cache_post_response = cache.processResponse(service, url, response)
if cache_post_response is not None:
if "response" in cache_post_response:
self._log(service=service, url=url, method=method,
response=response, cached=True,
start_time=start_time)
return cache_post_response["response"]
self._log(service=service, url=url, method=method, response=response,
cached=False, start_time=start_time)
return response
def prometheus_duration(self, duration):
"""
Override this method if you have service-specific logic
around response times
"""
self.prometheus_duration_observation(duration)
def prometheus_status(self, response):
"""
Override this method to insert service-specific logic
before setting the response status code observation
e.g., If the service applies special meaning to, say, 404 response
that might not make sense to observe
"""
self.prometheus_status_observation(response.status)
def prometheus_duration_observation(self, duration):
prometheus_duration.labels(self.service_name()).observe(duration)
def prometheus_status_observation(self, status):
# status category buckets
prometheus_status.labels(self.service_name()).observe(
(int(status) // 100) * 100)
def get_cache(self):
if DAO._cache_instance is None:
implementation = self.get_setting("DAO_CACHE_CLASS", None)
DAO._cache_instance = self._getModule(implementation, NoCache)
return DAO._cache_instance
def clear_cached_response(self, url):
self.get_cache().deleteCache(self.service_name(), url)
def get_implementation(self):
implementation = self.get_service_setting("DAO_CLASS", None)
# Handle the easy built-ins
if "Live" == implementation:
return self._get_live_implementation()
if "Mock" == implementation:
return self._get_mock_implementation()
# Legacy settings support
live = "restclients.dao_implementation.{}.Live".format(
self.service_name())
mock = "restclients.dao_implementation.{}.File".format(
self.service_name())
if live == implementation:
return self._get_live_implementation()
if mock == implementation:
return self._get_mock_implementation()
if implementation:
return self._getModule(implementation, None,
[self.service_name(), self])
return self._get_mock_implementation()
def _is_cacheable(self, method, url, headers, body=None):
if method == "GET":
return True
return False
def _ok_status_codes():
return [200, 201, 202, 204]
def _error_status_codes():
return []
def _get_live_implementation(self):
return LiveDAO(self.service_name(), self)
def _get_mock_implementation(self):
return MockDAO(self.service_name(), self)
def get_service_setting(self, key, default=None):
if default is None:
default = self.get_default_service_setting(key)
service_key = "{}_{}".format(self.service_name().upper(), key)
if hasattr(settings, "RESTCLIENTS_{}".format(service_key)):
return self.get_setting(service_key, default)
else:
return self.get_setting(key, default)
def get_setting(self, key, default=None):
key = "RESTCLIENTS_{}".format(key)
return getattr(settings, key, default)
def _getModule(self, value, default_class, args=[]):
if not value:
return default_class()
module, attr = value.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured(
"Error importing module {}: {}".format(module, e))
try:
config_module = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
"Module {} missing {} class".format(module, attr))
return config_module(*args)
def _log(self, *args, **kwargs):
if not self.should_log():
return
from_cache = 'yes' if kwargs.get('cached') else 'no'
cache_class = self.get_cache().__class__.__qualname__ if (
kwargs.get('cached')) else 'None'
response = kwargs.get('response')
total_time = time.time() - kwargs.get('start_time')
msg = (("service:{} method:{} url:{} status:{} from_cache:{} " +
"cache_class:{} time:{}").format(
kwargs.get('service'), kwargs.get('method'),
kwargs.get('url'), response.status,
from_cache, cache_class, total_time))
logger.info(msg)
def should_log(self):
if self.log_start is not None and self.log_end is not None:
if not self.log_start < datetime.datetime.now() < self.log_end:
return False
if not self.log_timing:
return False
if random.random() >= self.logging_rate:
return False
return True
class DAOImplementation(object):
def __init__(self, service_name, dao):
self._service_name = service_name
self.dao = dao
def is_live(self):
return False
def is_mock(self):
return False
class LiveDAO(DAOImplementation):
"""
Loads response objects by fetching resources from an HTTP(s) server.
"""
pools = {}
def is_live(self):
return True
def load(self, method, url, headers, body):
pool = self.get_pool()
timeout = pool.timeout
try:
return pool.urlopen(
method, url, body=body, headers=headers,
timeout=timeout,
pool_timeout=timeout.connect_timeout)
# will block for 1 sec if no connection is available
# then raise EmptyPoolError
except ssl.SSLError as err:
self._prometheus_ssl_error()
raise
except HTTPError as err:
status = 0
self._prometheus_timeout()
raise DataFailureException(url, status, err)
def get_pool(self):
service = self.dao.service_name()
if service not in LiveDAO.pools:
pool = self.create_pool()
LiveDAO.pools[service] = pool
return LiveDAO.pools[service]
def create_pool(self):
"""
Return a ConnectionPool instance of given host
"""
ca_certs = self.dao.get_setting("CA_BUNDLE",
"/etc/ssl/certs/ca-bundle.crt")
cert_file = self.dao.get_service_setting("CERT_FILE", None)
host = self.dao.get_service_setting("HOST")
key_file = self.dao.get_service_setting("KEY_FILE", None)
verify_https = self.dao.get_service_setting("VERIFY_HTTPS")
if verify_https is None:
verify_https = True
timeout = Timeout(connect=self._get_connect_timeout(),
read=self._get_timeout())
kwargs = {
"retries": Retry(total=1, connect=0, read=0, redirect=1),
"timeout": timeout,
"maxsize": self._get_max_pool_size(),
"block": True,
}
if key_file is not None and cert_file is not None:
kwargs["key_file"] = key_file
kwargs["cert_file"] = cert_file
if urlparse(host).scheme == "https":
kwargs["ssl_version"] = self.dao.get_service_setting(
"SSL_VERSION", ssl.PROTOCOL_TLS)
if verify_https:
kwargs["cert_reqs"] = "CERT_REQUIRED"
kwargs["ca_certs"] = ca_certs
else:
kwargs["cert_reqs"] = "CERT_NONE"
return connection_from_url(host, **kwargs)
def _get_connect_timeout(self):
"""
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed
"""
return float(self.dao.get_service_setting("CONNECT_TIMEOUT",
self.dao.get_setting("DEFAULT_CONNECT_TIMEOUT", 3)))
def _get_timeout(self):
"""
The maximum amount of time (in seconds) to wait between
consecutive READ operations for a response from the server.
"""
return float(self.dao.get_service_setting("TIMEOUT",
self.dao.get_setting("DEFAULT_TIMEOUT", 10)))
def _get_max_pool_size(self):
"""
The maximum connections per host.
| |
None,
73741: None,
73742: None,
73743: None,
73744: None,
73745: None,
73746: None,
73747: None,
73748: None,
73750: None,
73751: None,
73752: None,
73753: None,
73754: None,
73755: None,
73756: None,
73757: None,
73758: None,
73759: None,
73760: None,
73761: None,
73762: None,
73763: None,
73764: None,
73765: None,
73766: None,
73767: None,
73768: None,
73769: None,
73770: None,
73771: None,
73772: None,
73773: None,
73774: None,
73776: None,
73777: None,
73775: None,
73778: None,
73779: None,
73780: None,
73781: None,
73782: None,
73783: None,
73784: None,
73786: None,
73788: None,
73789: None,
73787: None,
73785: None,
73790: None,
73791: None,
73792: None,
73793: None,
73794: None,
73795: None,
73796: None,
73797: None,
73798: None,
73799: None,
73800: None,
73801: None,
73802: None,
73803: None,
73804: None,
73805: None,
73808: None,
73806: None,
73807: None,
73809: None,
73810: None,
73811: None,
73812: None,
73813: None,
73814: None,
73815: None,
73816: None,
73817: None,
73818: None,
73819: None,
73820: None,
73821: None,
73822: None,
73823: None,
73824: None,
73825: None,
73826: None,
73827: None,
73828: None,
73829: None,
73830: None,
73831: None,
73832: None,
73833: None,
73834: None,
73835: None,
73836: None,
73837: None,
73838: None,
73839: None,
73840: None,
73841: None,
73842: None,
73843: None,
73844: None,
73845: None,
73846: None,
73847: None,
73848: None,
73849: None,
73850: None,
73852: None,
73851: None,
73853: None,
73854: None,
73855: None,
73856: None,
73857: None,
73858: None,
73859: None,
73860: None,
73861: None,
73862: None,
73863: None,
73864: None,
73865: None,
73866: None,
73868: None,
73867: None,
73869: None,
73870: None,
73871: None,
73872: None,
73873: None,
73874: None,
73875: None,
73876: None,
73877: None,
73878: None,
73879: None,
73883: None,
73884: None,
73885: None,
73880: None,
73881: None,
73882: None,
73886: None,
73887: None,
73888: None,
73889: None,
73890: None,
73891: None,
73892: None,
73893: None,
73894: None,
73895: None,
73896: None,
73897: None,
73898: None,
73899: None,
73900: None,
73901: None,
73902: None,
73903: None,
73904: None,
73905: None,
73906: None,
73907: None,
73908: None,
73909: None,
73910: None,
73911: None,
73965: None,
73912: None,
73913: None,
73914: None,
73915: None,
73916: None,
73917: None,
73918: None,
73919: None,
73920: None,
73921: None,
73922: None,
73923: None,
73924: None,
73925: None,
73926: None,
73927: None,
73928: None,
73929: None,
73930: None,
73931: None,
73932: None,
73933: None,
73934: None,
73935: None,
73936: None,
73937: None,
73938: None,
73939: None,
73940: None,
73941: None,
73942: None,
73943: None,
73944: None,
73945: None,
73946: None,
73947: None,
73948: None,
73949: None,
73950: None,
73951: None,
73952: None,
73953: None,
73954: None,
73955: None,
73956: None,
73957: None,
73958: None,
73959: None,
73960: None,
73961: None,
73962: None,
73963: None,
73964: None,
73966: None,
73967: None,
73968: None,
73969: None,
73970: None,
73971: None,
73972: None,
73973: None,
73974: None,
73975: None,
73978: None,
73977: None,
73976: None,
73979: None,
73980: None,
73981: None,
73982: None,
73983: None,
73984: None,
73987: None,
73985: None,
73986: None,
73988: None,
73990: None,
73989: None,
73991: None,
73992: None,
73993: None,
73994: None,
73995: None,
73996: None,
73997: None,
73998: None,
73999: None,
74000: None,
74001: None,
74002: None,
74005: None,
74003: None,
74004: None,
74006: None,
74007: None,
74008: None,
74013: None,
74009: None,
74010: None,
74011: None,
74012: None,
74014: None,
74017: None,
74015: None,
74016: None,
74018: None,
74019: None,
74020: None,
74021: None,
74022: None,
74023: None,
74024: None,
74025: None,
74027: None,
74026: None,
74028: None,
74029: None,
74030: None,
74031: None,
74032: None,
74033: None,
74034: None,
74035: None,
74036: None,
74037: None,
74038: None,
74039: None,
74040: None,
74041: None,
74042: None,
74043: None,
74044: None,
74045: None,
74046: None,
74047: None,
74048: None,
74049: None,
74050: None,
74051: None,
74052: None,
74053: None,
74054: None,
74055: None,
74058: None,
74057: None,
74056: None,
74059: None,
74060: None,
74061: None,
74062: None,
74064: None,
74065: None,
74066: None,
74063: None,
74067: None,
74068: None,
74069: None,
74070: None,
74071: None,
74072: None,
74073: None,
74074: None,
74075: None,
74076: None,
74077: None,
74078: None,
74079: None,
74080: None,
74081: None,
74082: None,
74083: None,
74084: None,
74085: None,
74086: None,
74087: None,
74088: None,
74090: None,
74089: None,
74091: None,
74092: None,
74093: None,
74094: None,
74095: None,
74096: None,
74097: None,
74098: None,
74099: None,
74100: None,
74101: None,
74102: None,
74103: None,
74104: None,
74105: None,
74106: None,
74107: None,
74108: None,
74109: None,
74110: None,
74111: None,
74112: None,
74113: None,
74114: None,
74115: None,
74116: None,
74117: None,
74118: None,
74119: None,
74120: None,
74121: None,
74122: None,
74123: None,
74124: None,
74125: None,
74126: None,
74127: None,
74128: None,
74129: None,
74130: None,
74131: None,
74132: None,
74133: None,
74134: None,
74135: None,
74137: None,
74136: None,
74138: None,
74139: None,
74140: None,
74141: None,
74142: None,
74143: None,
74144: None,
74145: None,
74146: None,
74147: None,
74148: None,
74149: None,
74150: None,
74151: None,
74152: None,
74153: None,
74154: None,
74155: None,
74156: None,
74157: None,
74158: None,
74159: None,
74160: None,
74161: None,
74162: None,
74163: None,
74164: None,
74165: None,
74166: None,
74167: None,
74168: None,
74219: None,
74169: None,
74170: None,
74171: None,
74172: None,
74173: None,
74174: None,
74175: None,
74176: None,
74177: None,
74178: None,
74179: None,
74180: None,
74181: None,
74182: None,
74183: None,
74184: None,
74185: None,
74186: None,
74187: None,
74188: None,
74189: None,
74190: None,
74191: None,
74192: None,
74193: None,
74194: None,
74195: None,
74196: None,
74197: None,
74198: None,
74199: None,
74200: None,
74201: None,
74202: None,
74203: None,
74204: None,
74205: None,
74206: None,
74207: None,
74208: None,
74209: None,
74210: None,
74211: None,
74212: None,
74213: None,
74214: None,
74215: None,
74216: None,
74217: None,
74218: None,
74220: None,
74223: None,
74224: None,
74221: None,
74222: None,
74225: None,
74226: None,
74227: None,
74228: None,
74229: None,
74230: None,
74231: None,
74232: None,
74233: None,
74234: None,
74235: None,
74236: None,
74237: None,
74258: None,
74259: None,
74261: None,
74260: None,
74257: None,
74238: None,
74239: None,
74240: None,
74241: None,
74242: None,
74243: None,
74244: None,
74245: None,
74246: None,
74247: None,
74248: None,
74249: None,
74250: None,
74251: None,
74252: None,
74253: None,
74254: None,
74255: None,
74256: None,
74262: None,
74263: None,
74265: None,
74264: None,
74266: None,
74267: None,
74268: None,
74269: None,
74270: None,
74271: None,
74272: None,
74274: None,
74273: None,
74275: None,
74276: None,
74277: None,
74278: None,
74279: None,
74280: None,
74281: None,
74282: None,
74283: None,
74284: None,
74285: None,
74286: None,
74287: None,
74288: None,
74289: None,
74290: None,
74296: None,
74294: None,
74295: None,
74291: None,
74292: None,
74293: None,
74297: None,
74301: None,
74298: None,
74299: None,
74300: None,
74302: None,
74303: None,
74304: None,
74305: None,
74307: None,
74306: None,
74308: None,
74310: None,
74309: None,
74311: None,
74312: None,
74315: None,
74313: None,
74314: None,
74316: None,
74317: None,
74318: None,
74319: None,
74320: None,
74321: None,
74322: None,
74323: None,
74324: None,
74325: None,
74326: None,
74327: None,
74328: None,
74329: None,
74330: None,
74331: None,
74332: None,
74333: None,
74334: None,
74335: None,
74336: None,
74337: None,
74338: None,
74339: None,
74347: None,
74348: None,
74340: None,
74341: None,
74342: None,
74343: None,
74344: None,
74346: None,
74345: None,
74349: None,
74350: None,
74351: None,
74352: None,
74353: None,
74354: None,
74355: None,
74356: None,
74357: None,
74358: None,
74359: None,
74360: None,
74361: None,
74362: None,
74363: None,
74364: None,
74365: None,
74366: None,
74367: None,
74377: None,
74368: None,
74369: None,
74370: None,
74371: None,
74372: None,
74373: None,
74374: None,
74375: None,
74376: None,
74378: None,
74382: None,
74379: None,
74380: None,
74381: None,
74383: None,
74384: None,
74385: None,
74386: None,
74387: None,
74389: None,
74408: None,
74388: None,
74407: None,
74390: None,
74391: None,
74392: None,
74393: None,
74394: None,
74395: None,
74396: None,
74397: None,
74398: None,
74399: None,
74400: None,
74401: None,
74402: None,
74403: None,
74404: None,
74405: None,
74406: None,
74409: None,
74410: None,
74411: None,
74412: None,
74413: None,
74414: None,
74415: None,
74416: None,
74417: None,
74418: None,
74419: None,
74420: None,
74421: None,
74422: None,
74423: None,
74424: None,
74425: None,
74426: None,
74427: None,
74428: None,
74429: None,
74430: None,
74431: None,
74432: None,
74433: None,
74434: None,
74435: None,
74436: None,
74437: None,
74438: None,
74439: None,
74440: None,
74441: None,
74442: None,
74443: None,
74444: None,
74445: None,
74446: None,
74447: None,
74448: None,
74449: None,
74450: None,
74451: None,
74453: None,
74452: None,
74454: None,
74455: None,
74456: None,
74457: None,
74458: None,
74459: None,
74460: None,
74461: None,
74462: None,
74463: None,
74464: None,
74465: None,
74466: None,
74467: None,
74468: None,
74469: None,
74470: None,
74471: None,
74472: None,
74473: None,
74474: None,
74475: None,
74476: None,
74479: None,
74477: None,
74478: None,
74480: None,
74481: None,
74482: None,
74483: None,
74484: None,
74485: None,
74486: None,
74487: None,
74488: None,
74489: None,
74490: None,
74491: None,
74492: None,
74493: None,
74494: None,
74495: None,
74496: None,
74497: None,
74499: None,
74500: None,
74498: None,
74501: None,
74502: None,
74503: None,
74504: None,
74505: None,
74506: None,
74507: None,
74508: None,
74510: None,
74511: None,
74512: None,
74509: None,
74513: None,
74514: None,
74515: None,
74521: None,
74516: None,
74522: None,
74523: None,
74517: None,
74518: None,
74519: None,
74520: None,
74524: None,
74525: None,
74526: None,
74527: None,
74528: None,
74529: None,
74530: None,
74531: None,
74532: None,
74533: None,
74534: None,
74535: None,
74536: None,
74537: None,
74538: None,
74539: None,
74540: None,
74541: None,
74542: None,
74543: None,
74544: None,
74545: None,
74546: None,
74547: None,
74548: None,
74549: None,
74550: None,
74551: None,
74552: None,
74553: None,
74554: None,
74555: None,
74556: None,
74557: None,
74558: None,
74559: None,
74560: None,
74561: None,
74562: None,
74563: None,
74564: None,
74565: None,
74566: None,
74567: None,
74568: None,
74569: None,
74570: None,
74571: None,
74572: None,
74573: None,
74574: None,
74575: None,
74576: None,
74577: None,
74578: None,
74579: None,
74580: None,
74581: None,
74583: None,
74584: None,
74582: None,
74585: None,
74586: None,
74587: None,
74588: None,
74589: None,
74591: None,
74590: None,
74592: None,
74593: None,
74594: None,
74595: None,
74596: None,
74597: None,
74598: None,
74599: None,
74600: None,
74601: None,
74602: None,
74603: None,
74604: None,
74605: None,
74606: None,
9982: None,
67584: None,
67585: None,
67586: None,
67589: None,
67592: None,
67594: None,
67595: None,
67596: None,
67597: None,
67598: None,
67599: None,
67600: None,
67601: None,
67602: None,
67603: None,
67604: None,
67605: None,
67606: None,
67607: None,
67608: None,
67609: None,
67610: None,
67611: None,
67612: None,
67613: None,
67587: None,
67614: None,
67615: None,
67616: None,
67617: None,
67618: None,
67619: None,
67620: None,
67621: None,
67622: None,
67623: None,
67624: None,
67625: None,
67626: None,
67627: None,
67628: None,
67629: None,
67630: None,
67631: None,
67632: None,
67633: None,
67588: None,
67634: None,
67635: None,
67636: None,
67637: None,
67639: None,
67640: None,
67644: None,
67647: None,
1310: None,
42602: None,
42586: None,
42572: None,
42630: None,
42584: None,
42568: None,
42604: None,
42624: None,
42562: None,
42626: None,
42632: None,
1298: None,
1312: None,
1314: None,
1270: None,
1274: None,
1276: None,
1278: None,
42644: None,
42566: None,
42582: None,
42588: None,
42578: None,
1300: None,
42600: None,
42570: None,
42574: None,
1316: None,
1306: None,
42564: None,
42580: None,
1296: None,
1302: None,
42646: None,
42594: None,
42596: None,
42598: None,
42642: None,
42634: None,
42640: None,
42638: None,
42636: None,
1308: None,
1304: None,
42576: None,
42590: None,
42560: None,
42628: None,
42622: None,
42606: None,
7467: None,
42623: None,
1311: None,
42603: None,
42587: None,
42573: None,
42631: None,
42585: None,
42569: None,
42605: None,
42625: None,
42563: None,
42627: None,
42633: None,
1299: None,
1313: None,
1315: None,
1271: None,
1275: None,
1277: None,
1279: None,
42645: None,
42567: None,
42583: None,
42589: None,
42579: None,
1301: None,
42601: None,
42571: None,
42575: None,
1231: None,
1317: None,
1307: None,
42565: None,
42581: None,
1297: None,
1303: None,
42647: None,
42595: None,
42597: None,
42599: None,
42643: None,
42635: None,
42641: None,
42639: None,
42637: None,
1309: None,
1305: None,
42577: None,
42591: None,
42561: None,
42629: None,
9192: None,
66599: None,
66598: None,
66639: None,
66638: None,
43258: None,
43257: None,
43259: None,
2431: None,
2418: None,
2430: None,
2427: None,
2429: None,
2426: None,
2428: None,
2308: None,
2425: None,
43255: None,
43254: None,
43253: None,
43251: None,
43252: None,
2417: None,
2304: None,
43256: None,
43250: None,
2389: None,
2382: None,
11033: None,
11030: None,
11031: None,
11032: None,
127241: None,
127238: None,
127237: None,
127242: None,
127234: None,
127240: None,
127239: None,
127236: None,
127235: None,
127233: None,
127232: None,
119557: None,
119555: None,
119556: None,
9868: None,
9871: None,
119553: None,
119554: None,
9870: None,
9869: None,
9933: None,
9902: None,
127024: None,
127025: None,
127026: None,
127027: None,
127028: None,
127029: None,
127030: None,
127031: None,
127032: None,
127033: None,
127034: None,
127035: None,
127036: None,
127037: None,
127038: None,
127039: None,
127040: None,
127041: None,
127042: None,
127043: None,
127044: None,
127045: None,
127046: None,
127047: None,
127048: None,
127049: None,
127050: None,
127051: None,
127052: None,
127053: None,
127054: None,
127055: None,
127056: None,
127057: None,
127058: None,
127059: None,
127060: None,
127061: None,
127062: None,
127063: None,
127064: None,
127065: None,
127066: None,
127067: None,
127068: None,
127069: None,
127070: None,
127071: None,
127072: None,
127073: None,
127074: None,
127075: None,
127076: None,
127077: None,
127078: None,
127079: None,
127080: None,
127081: None,
127082: None,
127083: None,
127084: None,
127085: None,
127086: None,
127087: None,
127088: None,
127089: None,
127090: None,
127091: None,
127092: None,
127093: None,
127094: None,
127095: None,
127096: None,
127097: None,
127098: None,
127099: None,
127100: None,
127101: None,
127102: None,
127103: None,
127104: None,
127105: None,
127106: None,
127107: None,
127108: None,
127109: None,
127110: None,
127111: None,
127112: None,
127113: None,
127114: None,
127115: None,
127116: None,
127117: None,
127118: None,
127119: None,
127120: None,
127121: None,
127122: None,
127123: None,
8284: None,
11795: None,
11798: None,
11034: None,
11784: None,
11799: None,
8508: None,
9890: None,
9891: None,
11796: None,
11015: None,
9946: None,
9178: None,
11790: None,
77824: None,
77825: None,
77826: None,
77827: None,
77828: None,
77829: None,
77830: None,
77831: None,
77832: None,
77833: None,
77834: None,
77835: None,
77836: None,
77837: None,
77838: None,
77839: None,
77840: None,
77841: None,
77842: None,
77843: None,
77844: None,
77845: None,
77846: None,
77847: None,
77848: None,
77849: None,
77850: None,
77851: None,
77852: None,
77853: None,
77854: None,
77855: None,
77856: None,
77857: None,
77858: None,
77859: None,
77860: None,
77861: None,
77862: None,
77863: None,
77864: None,
77865: None,
77866: None,
77867: None,
77868: None,
77869: None,
77870: None,
77871: None,
77872: None,
77873: None,
77874: None,
77875: None,
77876: None,
77877: None,
77878: None,
77879: None,
77880: None,
77881: None,
77882: None,
77883: None,
77884: None,
77885: None,
77886: None,
77887: None,
77888: None,
77889: None,
77890: None,
77891: None,
77892: None,
77893: None,
77894: None,
77895: None,
77896: None,
77897: None,
77898: None,
77899: None,
77900: None,
77901: None,
77902: None,
77903: None,
78861: None,
78862: None,
78863: None,
78864: None,
78865: None,
78866: None,
78867: None,
78868: | |
<filename>test/data/ReqMgr/reqmgr2.py
#!/usr/bin/env python
"""
Request Manager service (ReqMgr) test and client script.
The script shall have no WMCore libraries dependency.
Command line interface: --help
There are mandatory command line arguments (e.g. URL of the Request Manager)
Production ConfigCache: https://cmsweb.cern.ch/couchdb/reqmgr_config_cache/
Note: tests for checking data directly in CouchDB in ReqMgr1 test script:
WMCore/test/data/ReqMgr/reqmgr.py
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import json
import logging
import os
import sys
import urllib.parse
from argparse import ArgumentParser
from http.client import HTTPSConnection, HTTPConnection
class RESTClient(object):
"""
HTTP client
HTTPS client based on the provided URL (http:// or https://)
"""
def __init__(self, url, cert=None, key=None, logger=None):
self.logger = logger
self.logger.info("RESTClient URL: %s", url)
if url.startswith("https://"):
self.logger.debug("Using HTTPS protocol, getting user identity files ...")
proxy_file = "/tmp/x509up_u%s" % os.getuid()
if not os.path.exists(proxy_file):
proxy_file = "UNDEFINED"
cert_file = cert or os.getenv("X509_USER_CERT",
os.getenv("X509_USER_PROXY", proxy_file))
key_file = key or os.getenv("X509_USER_KEY",
os.getenv("X509_USER_PROXY", proxy_file))
self.logger.info("Identity files:\n\tcert file: '%s'\n\tkey file: '%s' ",
cert_file, key_file)
url = url.replace("https://", '')
self.logger.debug("Creating connection HTTPS ...")
self.conn = HTTPSConnection(url, key_file=key_file, cert_file=cert_file)
if url.startswith("http://"):
self.logger.info("Using HTTP protocol, creating HTTP connection ...")
url = url.replace("http://", '')
self.conn = HTTPConnection(url)
def http_request(self, verb, uri, data=None, headers=None):
self.logger.debug("Request: %s %s %s ..." % (verb, uri, data))
self.conn.request(verb, uri, body=data, headers=headers or self.headers)
resp = self.conn.getresponse()
data = resp.read()
self.logger.debug("Status: %s", resp.status)
self.logger.debug("Reason: %s", resp.reason)
if resp.status != 200:
if hasattr(resp.msg, "x-error-detail"):
self.logger.warning("Message: %s", resp.msg["x-error-detail"])
return resp.status, data
class ReqMgrClient(RESTClient):
"""
Client REST interface to Request Manager service (ReqMgr).
Actions: all_tests
For reference:
jsonArgs = json.dumps(requestArgs["createRequest"])
status, data = self.http_request("PUT", "/reqmgr/reqMgr/request", data=jsonArgs)
data = json.loads(data)
requestName = data.values()[0]["request"]
params = {"requestName": requestName,
"status": "assignment-approved"}
encodedParams = urllib.urlencode(params)
logging.info("Approving request '%s' ..." % requestName)
status, data = self.http_request("PUT", "/reqmgr/reqMgr/request",
data=encodedParams, headers=self.textHeaders)
"""
def __init__(self, url, config, logger=None):
self.logger = logging.getLogger() if not logger else logger
self.logger.info("ReqMgr url: %s", url)
# ReqMgr based on WMCore.REST API requires accept types defined
self.headersUrl = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json"}
self.headersBody = {"Content-type": "application/json",
"Accept": "application/json"}
self.headers = self.headersUrl
self.urn_prefix = "/reqmgr2/data"
RESTClient.__init__(self, url, cert=config.cert, key=config.key, logger=logger)
def _caller_checker(self, urn, verb, input_data=None, exp_data=None):
urn = self.urn_prefix + urn
self.logger.info("Call %s %s %s", urn, verb, input_data)
status, data = self.http_request(verb, urn, data=input_data)
if status != 200:
self.logger.error("HTTP request failed with status: %s, data: %s", status, data)
return
data = json.loads(data)["result"]
if exp_data:
assert status == 200, "Call status is: %s" % status
assert data[0] == exp_data, "'%s' != '%s' mismatch." % (data[0], exp_data)
else:
assert status == 200, "Call status is: %s" % status
self.logger.info("status: %s\n%s", status, data)
return data
def delete_requests(self, config):
urn = self.urn_prefix + "/request"
for request_name in config.request_names:
self.logger.info("Deleting '%s' request ...", request_name)
args = urllib.parse.urlencode({"request_name": request_name})
status, data = self.http_request("DELETE", urn, data=args)
if status != 200:
self.logger.error("Failed to delete request with status: %s, data: %s", status, data)
sys.exit(1)
self.logger.info("Done.")
def create_request(self, config):
"""
config.request_args - arguments for both creation and assignment
"""
self.logger.info("Injecting request args:\n%s ...", config.request_args["createRequest"])
json_args = json.dumps(config.request_args["createRequest"])
urn = self.urn_prefix + "/request"
status, data = self.http_request("POST", urn, data=json_args,
headers=self.headersBody)
if status > 216:
self.logger.error("Failed to create request with status: %s, data: %s", status, data)
sys.exit(1)
data = json.loads(data)
self.logger.info(data)
request_name = data["result"][0]["request"]
self.approve_request(request_name)
self.logger.info("Create request '%s' succeeded.", request_name)
config.request_names = request_name
return request_name
def approve_request(self, request_name):
"""
Set request status assignment-approved of the requestName request.
Once ReqMgr provides proper API for status settings, esp. for assignment,
a single method setStates shall handle all request status changes.
"""
self.logger.info("Approving request '%s' ...", request_name)
json_args = json.dumps({"RequestStatus": "assignment-approved"})
urn = self.urn_prefix + "/request/%s" % request_name
status, data = self.http_request("PUT", urn, data=json_args,
headers=self.headersBody)
if status != 200:
self.logger.error("Failed to approve request with status: %s, data: %s", status, data)
sys.exit(1)
self.logger.info("Approve succeeded.")
def assign_request(self, config):
"""
config.request_args - arguments for both creation and assignment
"""
assign_args = config.request_args["assignRequest"]
assign_args["RequestStatus"] = "assigned"
json_args = json.dumps(assign_args)
if isinstance(config.request_names, basestring):
config.request_names = [config.request_names]
for request_name in config.request_names:
self.logger.info("Assigning %s with request args: %s ...",
request_name, config.request_args["assignRequest"])
urn = self.urn_prefix + "/request/%s" % request_name
status, data = self.http_request("PUT", urn, data=json_args,
headers=self.headersBody)
if status > 216:
self.logger.error("Failed to assign request with status: %s, data: %s", status, data)
sys.exit(1)
data = json.loads(data)
self.logger.info(data)
self.logger.info("Assign succeeded.")
def query_requests(self, config, to_query=None):
"""
If to_query and config.request_names are not specified, then
all requests in the system are queried.
toQuery - particular request name to query.
config.request_names - list of requests to query.
Returns a list of requests in either case.
"""
if to_query:
requests_to_query = [to_query]
else:
requests_to_query = config.request_names
requests_data = []
if requests_to_query:
for request_name in requests_to_query:
self.logger.info("Querying '%s' request ...", request_name)
urn = self.urn_prefix + "/request?name=%s" % request_name
status, data = self.http_request("GET", urn)
if status != 200:
self.logger.error("Failed to get request with status: %s, data: %s", status, data)
sys.exit(1)
request = json.loads(data)["result"][0]
for k, v in sorted(request.items()):
self.logger.info("\t%s: %s", k, v)
requests_data.append(request)
# returns data on requests in the same order as in the config.request_names
return requests_data
def all_tests(self, config):
self._caller_checker("/hello", "GET", exp_data="Hello world")
self._caller_checker("/hello?name=John", "GET", exp_data="Hello John")
self._caller_checker("/about", "GET")
self._caller_checker("/info", "GET")
group = "mygroup"
args = urllib.parse.urlencode({"group_name": group})
self._caller_checker("/group", "PUT", input_data=args)
data = self._caller_checker("/group", "GET")
assert group in data, "%s should be in %s" % (group, data)
self._caller_checker("/group", "DELETE", input_data=args)
data = self._caller_checker("/group", "GET")
assert group not in data, "%s should be deleted from %s" % (group, data)
team = "myteam"
args = urllib.parse.urlencode({"team_name": team})
self._caller_checker("/team", "PUT", input_data=args)
data = self._caller_checker("/team", "GET")
assert team in data, "%s should be in %s" % (team, data)
self._caller_checker("/team", "DELETE", input_data=args)
data = self._caller_checker("/team", "GET")
assert team not in data, "%s should be deleted from %s" % (team, data)
data = self._caller_checker("/software", "GET")
data = self._caller_checker("/status", "GET")
assert len(data) > 0, "%s should be non-empty list." % data
# test some request status
status = ["assigned", "assignment-approved", "failed", "new"]
for s in status: assert s in data, "%s is not in %s" % (s, data)
data2 = self._caller_checker("/status?transition=false", "GET")
assert data == data2, "%s != %s" % (data, data2)
# returns also all allowed transitions
data = self._caller_checker("/status?transition=true", "GET")
for status_def in data:
status = status_def.keys()[0]
trans = status_def[status]
assert status in data2, "%s is not in %s" % (status, data2)
assert isinstance(trans, list), "transition %s should be list" % trans
data = self._caller_checker("/type", "GET")
assert len(data) > 0, "%s should be non-empty list." % data
# request tests
new_request_name = self.create_request(config)
data = self._caller_checker("/request?name=%s" % new_request_name, "GET")
request = data[0][new_request_name]
assert request["RequestName"] == new_request_name
assert request["RequestStatus"] == "new"
self.logger.info("\nall_tests succeeded.")
def __del__(self):
self.conn.close()
del self.conn
# ---------------------------------------------------------------------------
def process_cli_args():
def err_exit(msg, parser):
print('\n')
parser.print_help()
print("\n\n%s" % msg)
sys.exit(1)
parser = ArgumentParser(usage='%(prog)s [options]', add_help=False)
actions = define_cli_options(parser)
# opts - new processed options
# args - remainder of the input array
opts = parser.parse_args()
# check command line arguments validity
if not opts.reqmgrurl:
err_exit("Missing mandatory --reqmgrurl.", parser)
if opts.create_request and not opts.config_file:
err_exit("When --create_request, --config_file is necessary.", parser)
if opts.create_request and opts.request_names:
err_exit("--request_names can't be provided with --create_request", parser)
if opts.all_tests and not opts.config_file:
err_exit("When --all_tests, --config_file is necessary", parser)
if opts.json and not (opts.create_request or opts.assign_request or opts.all_tests):
err_exit("--json only with --create_request, --assign_request or --all_tests", parser)
for action in [name for name in actions if getattr(opts, name)]:
if opts.all_tests and action and action != "all_tests":
err_exit("Arguments --all_tests and --%s mutually exclusive." % action, parser)
if opts.request_names:
# make it a list here
opts.request_names = opts.request_names.split(',')
return opts, actions
def define_cli_options(parser):
actions = []
# "-h" ------------------------------------------------------------------
help = "Display this help"
parser.add_argument("-h", "--help", help=help, action='help')
# "-c" ------------------------------------------------------------------
help = ("User cert file (or cert proxy file). "
"If not defined, tries X509_USER_CERT then X509_USER_PROXY env. "
"variables. And lastly /tmp/x509up_uUID.")
parser.add_argument("-c", "--cert", help=help)
# "-k" ------------------------------------------------------------------
help = ("User key file (or cert proxy file). "
"If not defined, tries X509_USER_KEY then X509_USER_PROXY env. "
"variables. And lastly /tmp/x509up_uUID.")
parser.add_argument("-k", "--key", help=help)
# -u --------------------------------------------------------------------
help = ("Request Manager service address (if not options is supplied, "
"returns a list of the requests in ReqMgr) "
"e.g.: https://maxareqmgr01.cern.ch")
parser.add_argument("-u", "--reqmgrurl", help=help)
# -f | |
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
import threading
import traceback
import socket
from collections import deque
from multiprocessing import reduction
from nvidia.dali._utils.external_source_impl import SourceKind, _is_generator_function
from nvidia.dali._multiproc.shared_batch import SharedBatchWriter, SharedBatchMeta, BufShmChunk, \
assert_valid_data_type, read_shm_message, write_shm_message
from nvidia.dali._multiproc.messages import CompletedTask, WorkerArgs, ShmMessageDesc, ScheduledTask
from nvidia.dali._multiproc.shared_queue import Dispatcher
class _WorkerProcessingResult:
"""Internal worker message containing computed minibatch or error message sent from the main thread
to the dispatcher thread. The dispatcher thread serializes the batch or the error and
forwards the result as `CompletedTask` to the main process"""
def __init__(self, scheduled, shm_chunk, data_batch=None, exception=None,
traceback_str=None):
self.context_i = scheduled.context_i
self.scheduled_i = scheduled.scheduled_i
self.minibatch_i = scheduled.task.minibatch_i
self.shm_chunk = shm_chunk
self.data_batch = data_batch
self.exception = exception
self.traceback_str = traceback_str
@classmethod
def done(cls, scheduled, shm_chunk, data_batch):
return cls(scheduled, shm_chunk, data_batch)
@classmethod
def failed(cls, scheduled, shm_chunk, exception, traceback_str=None):
return cls(scheduled, shm_chunk, exception=exception, traceback_str=traceback_str)
def is_failed(self):
return self.exception is not None
class SharedBatchDispatcher(Dispatcher):
"""SharedBatchesDispatcher serializes batches, puts them into provided
shared memory chunks along with completed task description and puts information
about ready chunks into the `queue`. It processes tasks in a separate thread to
overlap serialization of minibatches with next minibatches computation in case of
a callback waiting on IO extensively and to avoid multiple worker processes
waiting on inter-process ShmQueue access"""
def __init__(self, worker_id, result_queue, recv_queues):
# close receiving queues if writing results fails to unblock
# the main thread that may be waiting on new tasks to process
def on_thread_exit():
for queue in recv_queues:
queue.close()
super().__init__(result_queue, on_thread_exit)
self.worker_id = worker_id
def _serialize_failed_task(self, processed_task : _WorkerProcessingResult):
"""
Puts CompletedTask instance (that describes an error encountered when producing batch) in the provided
shared memory chunk (`processed_task.shm_chunk`).
Returns `ShmMessageDesc` instance, that describes shared memory chunk and placement (offset=0, size) of the
serialized CompletedTask instance in the chunk.
"""
shm_chunk = processed_task.shm_chunk
completed_task = CompletedTask.failed(self.worker_id, processed_task)
return write_shm_message(
self.worker_id, shm_chunk, completed_task, 0, resize=True)
def _serialize_done_task(self, processed_task : _WorkerProcessingResult):
"""
Puts produced batch in the provided shared memory chunk (`processed_task.shm_chunk`).
Layout of the data in the chunk:
[1. samples from the batch | 2. batch meta-data | 3. completed task].
1. Binary encoded samples from the batch (underlying data of numpy arrays),
aimed to be used as initialization buffers for arrays with no additional copy or deserialization.
2. Pickled list of meta-data of each sample, such as the sample's binary data offset in the chunk,
a shape and a type of the array.
3. Pickled CompletedTask instance (that contains offset and size of the serialized list from the second point).
Returns `ShmMessageDesc` instance, that describes shared memory chunk and placement (offset, size) of the
serialized CompletedTask instance in the chunk.
"""
shm_chunk = processed_task.shm_chunk
sbw = SharedBatchWriter(shm_chunk, processed_task.data_batch)
batch_meta = SharedBatchMeta.from_writer(sbw)
completed_task = CompletedTask.done(self.worker_id, processed_task, batch_meta)
return write_shm_message(
self.worker_id, shm_chunk, completed_task, sbw.total_size, resize=True)
def serialize_msgs(self, processed_tasks: List[_WorkerProcessingResult]):
shm_msgs = []
for processed_task in processed_tasks:
if processed_task.is_failed(): # one of the tasks failed
shm_msgs.append(self._serialize_failed_task(processed_task))
else:
shm_msgs.append(self._serialize_done_task(processed_task))
return shm_msgs
class SimpleQueueTaskReceiver:
"""
Simple wrapper around shm queue, pops first element from the queue
and returns
"""
def __init__(self, queue):
self.queue = queue
def get_task(self):
recv = self.queue.get()
if recv is None:
return
[task] = recv
return task
def get_recv_queues(self):
return [self.queue]
def close(self):
self.queue.close()
class MixedTaskReceiver:
"""
Mixes eager and idle worker threads each taking tasks from a different inter-process queue and
putting the tasks into a single (worker's internal) `task_queue`. Eager worker thread takes tasks from
the dedicated queue, i.e. tasks that can be processed only by the given worker process.
Idle worker thread takes tasks from the general queue, i.e. tasks that can be processed by
any worker process from the pool.
Eager worker reads tasks whenever any is available and moves them into the worker's internal queue,
whereas idle worker serves as a fallback that aims to read a single item only if the internal queue is empty
and the main thread does not process any task (is idle).
"""
class EagerReceiverWorker:
"""
Worker thread waiting for any tasks available in the inter-process queue `dedicated_task_queue`.
If anything is available, it takes all the items and puts them into worker's internal task queue.
"""
def __init__(self, receiver_state, dedicated_task_queue):
self.receiver_state = receiver_state
self.dedicated_task_queue = dedicated_task_queue
self.thread = threading.Thread(target=self._receiver_loop, daemon=True)
self.thread.start()
def _receiver_loop(self):
try:
while True:
recv = self.dedicated_task_queue.get(num_samples=None)
if recv is None:
break
self.receiver_state.insert_task(recv)
finally:
self.receiver_state.insert_task(None)
def close(self):
self.dedicated_task_queue.close()
self.thread.join()
class IdleReceiverWorker:
"""
Worker thread that, when notified, takes a single task from the inter-process queue and
puts it into worker's internal task queue. It aims to take the task only if the main thread
reports it has no tasks to process - it rechecks that condition if it had to wait on empty
inter-process queue.
"""
def __init__(self, receiver_state, general_task_queue):
self.receiver_state = receiver_state
self.general_task_queue = general_task_queue
self.thread = threading.Thread(target=self._receiver_loop, daemon=True)
self.thread.start()
def _receiver_loop(self):
try:
while True:
if not self.receiver_state.wait_for_idle():
break
# Worker has no dedicated work to do (is idle), so take one task from general queue.
# If general queue is empty, the call will block and then recheck the condition
recv = self.general_task_queue.get(predicate=self.receiver_state.is_idle_and_uninterrupted)
if recv is None:
break
if len(recv): # if `is_idle_and_uninterrupted` returned False, recv is an empty list
self.receiver_state.insert_task(recv)
finally:
self.receiver_state.insert_task(None)
def close(self):
self.receiver_state.interrupt_idle()
self.general_task_queue.close()
self.thread.join()
class MixedReceiverState:
def __init__(self):
self.lock = threading.Lock()
self.tasks_cv = threading.Condition(lock=self.lock)
self.idle_cv = threading.Condition(lock=self.lock)
self.is_idle = False
self.is_interrupted = False
self.task_queue = deque()
def _is_idle_state(self):
return self.is_idle and len(self.task_queue) == 0
def is_idle_and_uninterrupted(self):
with self.lock:
return not self.is_interrupted and self._is_idle_state()
def wait_for_idle(self):
with self.lock:
while not self.is_interrupted and not self._is_idle_state():
self.idle_cv.wait()
return not self.is_interrupted
def interrupt_idle(self):
with self.lock:
self.is_interrupted = True
self.idle_cv.notify()
def insert_task(self, recv):
with self.lock:
if recv is None:
self.task_queue.appendleft(recv)
else:
self.task_queue.extend(recv)
self.tasks_cv.notify()
def get_task(self):
with self.lock:
waited = False
while len(self.task_queue) == 0:
# there's only one consumer of task_queue, so no stealing of tasks between waits can happen
if not waited:
waited = True
self.is_idle = True
self.idle_cv.notify()
self.tasks_cv.wait()
self.is_idle = False
task = self.task_queue.popleft()
return task
def __init__(self, dedicated_task_queue, general_task_queue):
self.dedicated_task_queue = dedicated_task_queue
self.general_task_queue = general_task_queue
self.state = self.MixedReceiverState()
self.receivers = []
try:
self.receivers.append(self.EagerReceiverWorker(self.state, self.dedicated_task_queue))
self.receivers.append(self.IdleReceiverWorker(self.state, self.general_task_queue))
except:
self.close()
raise
def get_recv_queues(self):
return [self.general_task_queue, self.dedicated_task_queue]
def get_task(self):
return self.state.get_task()
def close(self):
for receiver in self.receivers:
receiver.close()
self.receivers.clear()
class IterableSource:
"""Wraps iterator/generator passed to External Source to enforce ES `cycle` policy specified by the user.
It is a counterpart of _CycleIter/_CycleGenIter wrappers from non parallel mode.
However due to prefetching in parallel mode `cycle`=raise will raise StopIteration in consecutive calls
until the new epoch starts (i.e. which happens with pipline.reset call)"""
def __init__(self, source_desc):
self.source_desc = source_desc
self._reset_iter(0)
def __call__(self, scheduled : ScheduledTask):
if self.raised_stop_iter:
# if iterator runs in "raise" mode and a new epoch started (i.e. source context was reset)
if self.source_desc.cycle == "raise" and self.epoch_start < scheduled.epoch_start:
self._reset_iter(scheduled.epoch_start)
else:
raise StopIteration
return self._get_next()
def _reset_iter(self, epoch_start):
self.iter = IterableSource.get_iter(self.source_desc)
self.raised_stop_iter = False
self.epoch_start = epoch_start
def _get_next(self):
try:
return next(self.iter)
except StopIteration:
self.raised_stop_iter = True
if self.source_desc.cycle != "quiet" and self.source_desc.cycle is not True:
raise
# in quiet mode immediately reset the source and return the first iteration
self.iter = IterableSource.get_iter(self.source_desc)
next_iter = next(self.iter)
# Set the `raised_stop_iter` flag to False after the __next__ call, so that, if it raises StopIteration
# immediately after the reset, the wrapper can consistently raise StopIteration from then on.
# The `epoch_start` is not updated - keeping track of it is not necessary in the | |
raise Exception('sessionUuid of action[RemoveRemoteCidrsFromIPsecConnectionAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveSNSDingTalkAtPersonAction(inventory.APIRemoveSNSDingTalkAtPersonMsg):
def __init__(self):
super(RemoveSNSDingTalkAtPersonAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveSNSDingTalkAtPersonAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveSchedulerJobFromSchedulerTriggerAction(inventory.APIRemoveSchedulerJobFromSchedulerTriggerMsg):
def __init__(self):
super(RemoveSchedulerJobFromSchedulerTriggerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveSchedulerJobFromSchedulerTriggerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveUserFromGroupAction(inventory.APIRemoveUserFromGroupMsg):
def __init__(self):
super(RemoveUserFromGroupAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveUserFromGroupAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveVmFromAffinityGroupAction(inventory.APIRemoveVmFromAffinityGroupMsg):
def __init__(self):
super(RemoveVmFromAffinityGroupAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveVmFromAffinityGroupAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RemoveVmNicFromLoadBalancerAction(inventory.APIRemoveVmNicFromLoadBalancerMsg):
def __init__(self):
super(RemoveVmNicFromLoadBalancerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RemoveVmNicFromLoadBalancerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RequestBaremetalConsoleAccessAction(inventory.APIRequestBaremetalConsoleAccessMsg):
def __init__(self):
super(RequestBaremetalConsoleAccessAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RequestBaremetalConsoleAccessAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RequestConsoleAccessAction(inventory.APIRequestConsoleAccessMsg):
def __init__(self):
super(RequestConsoleAccessAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RequestConsoleAccessAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ResizeDataVolumeAction(inventory.APIResizeDataVolumeMsg):
def __init__(self):
super(ResizeDataVolumeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ResizeDataVolumeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ResizeRootVolumeAction(inventory.APIResizeRootVolumeMsg):
def __init__(self):
super(ResizeRootVolumeAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ResizeRootVolumeAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ResumeVmInstanceAction(inventory.APIResumeVmInstanceMsg):
def __init__(self):
super(ResumeVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ResumeVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RevertVolumeFromSnapshotAction(inventory.APIRevertVolumeFromSnapshotMsg):
def __init__(self):
super(RevertVolumeFromSnapshotAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RevertVolumeFromSnapshotAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class RevokeResourceSharingAction(inventory.APIRevokeResourceSharingMsg):
def __init__(self):
super(RevokeResourceSharingAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[RevokeResourceSharingAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ScanBackupStorageAction(inventory.APIScanBackupStorageMsg):
def __init__(self):
super(ScanBackupStorageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ScanBackupStorageAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SearchGenerateSqlTriggerAction(inventory.APISearchGenerateSqlTriggerMsg):
def __init__(self):
super(SearchGenerateSqlTriggerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SearchGenerateSqlTriggerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SessionMessageAction(inventory.APISessionMessage):
def __init__(self):
super(SessionMessageAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetImageQgaAction(inventory.APISetImageQgaMsg):
def __init__(self):
super(SetImageQgaAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetImageQgaAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetL3NetworkMtuAction(inventory.APISetL3NetworkMtuMsg):
def __init__(self):
super(SetL3NetworkMtuAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetL3NetworkMtuAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetL3NetworkRouterInterfaceIpAction(inventory.APISetL3NetworkRouterInterfaceIpMsg):
def __init__(self):
super(SetL3NetworkRouterInterfaceIpAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetL3NetworkRouterInterfaceIpAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetNicQosAction(inventory.APISetNicQosMsg):
def __init__(self):
super(SetNicQosAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetNicQosAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVipQosAction(inventory.APISetVipQosMsg):
def __init__(self):
super(SetVipQosAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVipQosAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmBootOrderAction(inventory.APISetVmBootOrderMsg):
def __init__(self):
super(SetVmBootOrderAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmBootOrderAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmConsolePasswordAction(inventory.APISetVmConsolePasswordMsg):
def __init__(self):
super(SetVmConsolePasswordAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmConsolePasswordAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmHostnameAction(inventory.APISetVmHostnameMsg):
def __init__(self):
super(SetVmHostnameAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmHostnameAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmInstanceHaLevelAction(inventory.APISetVmInstanceHaLevelMsg):
def __init__(self):
super(SetVmInstanceHaLevelAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmInstanceHaLevelAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmMonitorNumberAction(inventory.APISetVmMonitorNumberMsg):
def __init__(self):
super(SetVmMonitorNumberAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmMonitorNumberAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmQgaAction(inventory.APISetVmQgaMsg):
def __init__(self):
super(SetVmQgaAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmQgaAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmRDPAction(inventory.APISetVmRDPMsg):
def __init__(self):
super(SetVmRDPAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmRDPAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmSshKeyAction(inventory.APISetVmSshKeyMsg):
def __init__(self):
super(SetVmSshKeyAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmSshKeyAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmStaticIpAction(inventory.APISetVmStaticIpMsg):
def __init__(self):
super(SetVmStaticIpAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmStaticIpAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVmUsbRedirectAction(inventory.APISetVmUsbRedirectMsg):
def __init__(self):
super(SetVmUsbRedirectAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVmUsbRedirectAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVolumeQosAction(inventory.APISetVolumeQosMsg):
def __init__(self):
super(SetVolumeQosAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVolumeQosAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SetVpcVRouterDistributedRoutingEnabledAction(inventory.APISetVpcVRouterDistributedRoutingEnabledMsg):
def __init__(self):
super(SetVpcVRouterDistributedRoutingEnabledAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SetVpcVRouterDistributedRoutingEnabledAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class ShareResourceAction(inventory.APIShareResourceMsg):
def __init__(self):
super(ShareResourceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[ShareResourceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class StartBaremetalPxeServerAction(inventory.APIStartBaremetalPxeServerMsg):
def __init__(self):
super(StartBaremetalPxeServerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[StartBaremetalPxeServerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class StartEcsInstanceAction(inventory.APIStartEcsInstanceMsg):
def __init__(self):
super(StartEcsInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[StartEcsInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class StartVmInstanceAction(inventory.APIStartVmInstanceMsg):
def __init__(self):
super(StartVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[StartVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class StopBaremetalPxeServerAction(inventory.APIStopBaremetalPxeServerMsg):
def __init__(self):
super(StopBaremetalPxeServerAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[StopBaremetalPxeServerAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class StopEcsInstanceAction(inventory.APIStopEcsInstanceMsg):
def __init__(self):
super(StopEcsInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[StopEcsInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class StopVmInstanceAction(inventory.APIStopVmInstanceMsg):
def __init__(self):
super(StopVmInstanceAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[StopVmInstanceAction] cannot be None')
evt = api.async_call(self, self.sessionUuid)
self.out = evt
return self.out
class SubmitLongJobAction(inventory.APISubmitLongJobMsg):
def __init__(self):
super(SubmitLongJobAction, self).__init__()
self.sessionUuid = None
self.out = None
def run(self):
if not self.sessionUuid:
raise Exception('sessionUuid of action[SubmitLongJobAction] cannot be None')
evt = api.async_call(self, | |
hosts_dictionary.keys(): # we ping ourselvels as well
hosts_fping = hosts_fping + host + " "
for srchost in hosts_dictionary.keys():
print
print("Starting ping run from " + srchost + " to all nodes")
fileurl = os.path.join(logdir, srchost + "_" + "all")
command = "ssh -o StrictHostKeyChecking=no " + srchost + \
" fping -C " + fping_count_str + " -q -A " + hosts_fping
with open(fileurl, 'wb', 0) as logfping:
runfping = subprocess.Popen(shlex.split(
command), stderr=subprocess.STDOUT, stdout=logfping)
runfping.wait()
logfping.close()
print("Ping run from " + srchost + " to all nodes completed")
def mean_list(list):
if len(list) == 0:
sys.exit(RED + "QUIT: " + NOCOLOR +
"cannot calculate mean of list: " + repr(list) + "\n")
# We replace a timeout "-" for 1 sec latency
list = [lat.replace('-', '1000.00') for lat in list]
list = [float(lat) for lat in list] # we convert them to float
mean = sum(list) / len(list)
return mean
def max_list(list):
if len(list) == 0:
sys.exit(RED + "QUIT: " + NOCOLOR +
"cannot calculate max of list: " + repr(list) + "\n")
# We replace a timeout "-" for 1 sec latency
list = [lat.replace('-', '1000.00') for lat in list]
list = [float(lat) for lat in list]
max_lat = max(list)
return max_lat
def min_list(list):
if len(list) == 0:
sys.exit(RED + "QUIT: " + NOCOLOR +
"cannot calculate min of list: " + repr(list) + "\n")
# We replace a timeout "-" for 1 sec latency
list = [lat.replace('-', '1000.00') for lat in list]
list = [float(lat) for lat in list]
min_lat = min(list)
return min_lat
def stddev_list(list, mean):
if len(list) == 0:
sys.exit(
RED +
"QUIT: " +
NOCOLOR +
"cannot calculate standard deviation of list: " +
repr(list) +
"\n")
# We replace a timeout "-" for 1 sec latency
list = [lat.replace('-', '1000.00') for lat in list]
list = [float(lat) for lat in list]
stddev_lat = sqrt(float(
reduce(lambda x, y: x + y, map(
lambda x: (x - mean) ** 2, list))) / len(list))
stddev_lat = Decimal(stddev_lat)
stddev_lat = round(stddev_lat, 2)
return stddev_lat
def load_multiple_fping(logdir, hosts_dictionary):
all_fping_dictionary = {}
all_fping_dictionary_max = {}
all_fping_dictionary_min = {}
all_fping_dictionary_stddev = {}
mean_all = []
max_all = []
min_all = []
# Loads log file and returns dictionary
for srchost in hosts_dictionary.keys():
print
print("Loading ping results of " + srchost + " to all nodes")
fileurl = os.path.join(logdir, srchost + "_all")
logfping = open(fileurl, 'r', 0)
for rawfping in logfping:
hostIP = rawfping.split(':')[0]
hostIP = hostIP.rstrip(' ')
if srchost == hostIP: # we ignore ourselves
continue
latencies = rawfping.split(':')[1]
latencies = latencies.lstrip(' ') # Clean up first space
latencies = latencies.rstrip('\n') # Clean up new line character
latencies_list = latencies.split(' ')
# our mean calculation expect strings. Need to change this when
# optimizing
mean_all.append(str(mean_list(latencies_list)))
max_all.append(max(latencies_list))
min_all.append(min(latencies_list))
# we use Decimal to round the results
mean = Decimal(mean_list(mean_all))
mean = round(mean, 2) # we round to 2 decimals
all_fping_dictionary[srchost] = mean
all_fping_dictionary_max[srchost] = max_list(max_all)
all_fping_dictionary_min[srchost] = min_list(min_all)
all_fping_dictionary_stddev[srchost] = stddev_list(mean_all, mean)
print("Load ping results from " + srchost + " to all nodes completed")
return (all_fping_dictionary, all_fping_dictionary_max,
all_fping_dictionary_min, all_fping_dictionary_stddev)
def load_single_fping(logdir, hosts_dictionary):
single_fping_dictionary = {}
single_fping_dictionary_max = {}
single_fping_dictionary_min = {}
single_fping_dictionary_stddev = {}
# Loads log file and return dictinary
for srchost in hosts_dictionary.keys():
print
print("Loading ping results of " + srchost + " to each node")
for dsthost in hosts_dictionary.keys():
if srchost is not dsthost:
print("\tLoading from " + srchost + " to " + dsthost)
fileurl = os.path.join(logdir, srchost + "_" + dsthost)
try:
with open(fileurl, 'r', 0) as logfping:
rawfping = logfping.readline() # Only 1 line
hostIP = rawfping.split(':')[0]
hostIP = hostIP.rstrip(' ')
latencies = rawfping.split(':')[1]
latencies = latencies.lstrip(
' ') # Clean up first space
# Clean up new line character
latencies = latencies.rstrip('\n')
latencies_list = latencies.split(' ')
print("\tLoaded from " + srchost +
" to " + dsthost + " completed")
except Exception:
sys.exit(RED + "QUIT: " + NOCOLOR +
"Cannot parse LOG file: " + fileurl)
# Following calls need to be optimized
# we use Decimal to round the results
mean = Decimal(mean_list(latencies_list))
mean = round(mean, 2) # we round to 2 decimals
single_fping_dictionary[hostIP] = mean
single_fping_dictionary_max[hostIP] = max_list(latencies_list)
single_fping_dictionary_min[hostIP] = min_list(latencies_list)
single_fping_dictionary_stddev[hostIP] = stddev_list(
latencies_list, mean)
print("Load ping results from " + srchost + " to each node completed")
return (single_fping_dictionary, single_fping_dictionary_max,
single_fping_dictionary_min, single_fping_dictionary_stddev)
def fping_KPI(
fping_dictionary,
fping_dictionary_max,
fping_dictionary_min,
fping_dictionary_stddev,
test_string,
max_avg_latency,
max_max_latency,
max_stddev_latency):
errors = 0
print("Results for test " + test_string + "")
max_avg_latency_str = str(round(max_avg_latency, 2))
max_max_latency_str = str(round(max_max_latency, 2))
max_stddev_latency_str = str(round(max_stddev_latency, 2))
for host in fping_dictionary.keys():
if fping_dictionary[host] >= max_avg_latency:
errors = errors + 1 # yes yes +=
print(RED +
"ERROR: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" average latency is " +
str(fping_dictionary[host]) +
" msec. Which is higher than the KPI of " +
max_avg_latency_str +
" msec")
else:
print(GREEN +
"OK: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" average latency is " +
str(fping_dictionary[host]) +
" msec. Which is lower than the KPI of " +
max_avg_latency_str +
" msec")
if fping_dictionary_max[host] >= max_max_latency:
errors = errors + 1
print(RED +
"ERROR: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" maximum latency is " +
str(fping_dictionary_max[host]) +
" msec. Which is higher than the KPI of " +
max_max_latency_str +
" msec")
else:
print(GREEN +
"OK: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" maximum latency is " +
str(fping_dictionary_max[host]) +
" msec. Which is lower than the KPI of " +
max_max_latency_str +
" msec")
if fping_dictionary_min[host] >= max_avg_latency:
errors = errors + 1
print(RED +
"ERROR: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" minimum latency is " +
str(fping_dictionary_min[host]) +
" msec. Which is higher than the KPI of " +
max_avg_latency_str +
" msec")
else:
print(GREEN +
"OK: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" minimum latency is " +
str(fping_dictionary_min[host]) +
" msec. Which is lower than the KPI of " +
max_avg_latency_str +
" msec")
if fping_dictionary_stddev[host] >= max_stddev_latency:
errors = errors + 1
print(RED +
"ERROR: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" standard deviation of latency is " +
str(fping_dictionary_stddev[host]) +
" msec. Which is higher than the KPI of " +
max_stddev_latency_str +
" msec")
else:
print(GREEN +
"OK: " +
NOCOLOR +
"on host " +
host +
" the " +
test_string +
" standard deviation of latency is " +
str(fping_dictionary_stddev[host]) +
" msec. Which is lower than the KPI of " +
max_stddev_latency_str +
" msec")
print
return errors # Use this to give number of nods is not exact in all cases
def test_ssh(hosts_dictionary):
for host in hosts_dictionary.keys():
try:
ssh_return_code = subprocess.call(['ssh',
'-oStrictHostKeyChecking=no',
'-oBatchMode=yes',
host,
'uname'],
stdout=DEVNULL,
stderr=DEVNULL)
if ssh_return_code == 0:
print(GREEN + "OK: " + NOCOLOR +
"SSH with node " + host + " works")
else:
sys.exit(
RED +
"QUIT: " +
NOCOLOR +
"cannot run ssh to " +
host +
". Please fix this problem before running this tool again")
except Exception:
sys.exit(
RED +
"QUIT: " +
NOCOLOR +
"cannot run ssh to " +
host +
". Please fix this problem before running this tool again")
print
def print_end_summary(s_avg_fp_err, a_avg_fp_err, lat_kpi_ok, fping_kpi_ok):
# End summary and say goodbye
passed = True
print
print("The summary of this run:")
print
if s_avg_fp_err > 0:
print(RED + "\tThe 1:1 fping latency test failed " +
str(s_avg_fp_err) + " time[s]" + NOCOLOR)
passed = False
else:
print(
GREEN | |
flag == 1 and ('pron-det' in pos_tags.columns):
used_features[41] = True
features_pair.append(pos_tags['pron-det'][pair])
else:
features_pair.append(0)
if flag == 1 and ('pron-indp' in pos_tags.columns):
used_features[42] = True
features_pair.append(pos_tags['pron-indp'][pair])
else:
features_pair.append(0)
if flag == 1 and ('adv' in pos_tags.columns):
used_features[43] = True
features_pair.append(pos_tags['adv'][pair])
else:
features_pair.append(0)
if flag == 1 and ('num' in pos_tags.columns):
used_features[44] = True
features_pair.append(pos_tags['num'][pair])
else:
features_pair.append(0)
if flag == 1 and ('prp' in pos_tags.columns):
used_features[45] = True
features_pair.append(pos_tags['prp'][pair])
else:
features_pair.append(0)
if flag == 0 and ('intj' in pos_tags.columns):
used_features[46] = True
features_pair.append(pos_tags['intj'][pair])
else:
features_pair.append(0)
if flag == 0 and ('conj-s' in pos_tags.columns):
used_features[47] = True
features_pair.append(pos_tags['conj-s'][pair])
else:
features_pair.append(0)
if flag == 1 and ('conj-c' in pos_tags.columns):
used_features[48] = True
features_pair.append(pos_tags['conj-c'][pair])
else:
features_pair.append(0)
if flag == 1 and ('punc' in pos_tags.columns):
used_features[49] = True
features_pair.append(pos_tags['punc'][pair])
else:
features_pair.append(0)
if flag == 0 and ('all_ners' in ners.columns):
used_features[50] = True
features_pair.append(ners['all_ners'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-ABSTRACCAO' in ners.columns):
used_features[51] = True
features_pair.append(ners['B-ABSTRACCAO'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-ACONTECIMENTO' in ners.columns):
used_features[52] = True
features_pair.append(ners['B-ACONTECIMENTO'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-COISA' in ners.columns):
used_features[53] = True
features_pair.append(ners['B-COISA'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-LOCAL' in ners.columns):
used_features[54] = True
features_pair.append(ners['B-LOCAL'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-OBRA' in ners.columns):
used_features[55] = True
features_pair.append(ners['B-OBRA'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-ORGANIZACAO' in ners.columns):
used_features[56] = True
features_pair.append(ners['B-ORGANIZACAO'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-OUTRO' in ners.columns):
used_features[57] = True
features_pair.append(ners['B-OUTRO'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-PESSOA' in ners.columns):
used_features[58] = True
features_pair.append(ners['B-PESSOA'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-TEMPO' in ners.columns):
used_features[59] = True
features_pair.append(ners['B-TEMPO'][pair])
else:
features_pair.append(0)
if flag == 0 and ('B-VALOR' in ners.columns):
used_features[60] = True
features_pair.append(ners['B-VALOR'][pair])
else:
features_pair.append(0)
if flag == 1:
used_features[61] = True
features_pair.append(dependencies['dependency_parsing_jc'][pair])
if flag == 1:
used_features[62] = True
features_pair.append(wn_cosine_1_2['NG'][pair])
if flag == 1:
used_features[63] = True
features_pair.append(cn_cosine_1_2_3['NG'][pair])
if flag == 0:
used_features[64] = True
features_pair.append(numberbatch[pair])
if flag == 0:
used_features[65] = True
features_pair.append(numberbatch_tfidf[pair])
tuple_features_pair = tuple(features_pair)
features.append(tuple_features_pair)
if f_selection is not None:
return np.array(features)
else:
return np.array(features), used_features
def debug_data(data, filename):
""" Function used to debug the corpus state during preprocessing """
if isinstance(data, pd.DataFrame):
data_to_print = data.values.tolist()
else:
data_to_print = data
with open(filename, 'w') as f:
for item in data_to_print:
f.write("%s\n" % item)
def load_embeddings_models():
""" Function used to load the word-embedding models """
# ---LOADING WORD2VEC MODEL---
model_load_path = os.path.join(ROOT_PATH, 'models', 'word2vec', 'NILC', 'nilc_cbow_s300_300k.txt')
# model_load_path = os.path.join('models', 'word2vec', 'NILC', 'nilc_skip_s300.txt')
start_time = time.time()
print("Started loading the word2vec model")
word2vec_model = KeyedVectors.load_word2vec_format(model_load_path)
# word2vec_model = None
print("Model loaded")
print("--- %s seconds ---" %(time.time() - start_time))
print('\a')
# ---LOADING FASTTEXT MODEL---
model_path = os.path.join(ROOT_PATH, 'models', 'fastText', 'cc.pt.300_300k.vec')
start_time = time.time()
print("Started loading the fasttext model")
fasttext_model = KeyedVectors.load_word2vec_format(model_path)
# fasttext_model = None
print("Model loaded")
print("--- %s seconds ---" %(time.time() - start_time))
print('\a')
# ---LOADING PT-LKB MODEL---
model_load_path = os.path.join(ROOT_PATH, 'models', 'ontoPT', 'PT-LKB_embeddings_64', 'ptlkb_64_30_200_p_str.emb')
# model_load_path = os.path.join('models', 'ontoPT', 'PT-LKB_embeddings_128', 'ptlkb_128_80_10_p_str.emb')
start_time = time.time()
print("Started loading the PT-LKB-64 model")
ptlkb64_model = KeyedVectors.load_word2vec_format(model_load_path)
# ptlkb64_model = None
print("Model loaded")
print("--- %s seconds ---" %(time.time() - start_time))
print('\a')
# ---LOADING GLOVE-300 MODEL---
model_load_path = os.path.join(ROOT_PATH, 'models', 'glove', 'glove_s300_300k.txt')
# model_load_path = os.path.join('models', 'glove', 'glove_s100.txt')
start_time = time.time()
print("Started loading the GLOVE 300 dimensions model")
glove300_model = KeyedVectors.load_word2vec_format(model_load_path)
# glove300_model = None
print("Model loaded")
print("--- %s seconds ---" %(time.time() - start_time))
print('\a')
# ---LOADING NUMBERBATCH MODEL---
model_load_path = os.path.join(ROOT_PATH, 'models', 'numberbatch', 'numberbatch-17.02_pt_tratado.txt')
start_time = time.time()
print("Started loading the NUMBERBATCH dimensions model")
numberbatch_model = KeyedVectors.load_word2vec_format(model_load_path)
# numberbatch_model = None
print("Model loaded")
print("--- %s seconds ---" %(time.time() - start_time))
print('\a')
return word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model
def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):
""" Function used to select the best percentile selector """
percentile_score = 0
percentiles = [25, 35, 45, 50, 55, 65, 75]
# percentiles = [45]
percentile_selector = None
percentile_train_features_selected = None
percentile_test_features_selected = None
for percentile in percentiles:
print(percentile)
temp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)
temp_percentile_selector.fit(train_features, train_similarity_target)
temp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)
temp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)
regressor.fit(temp_percentile_train_features_selected, train_similarity_target)
temp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)
print("The score on the selected features (Percentile Selector): %.3f" % temp_score)
if temp_score > percentile_score:
percentile_score = temp_score
percentile_selector = temp_percentile_selector
percentile_train_features_selected = temp_percentile_train_features_selected
percentile_test_features_selected = temp_percentile_test_features_selected
percentile_mask = percentile_selector.get_support()
print("This is the percentile mask: ")
print(percentile_mask)
return percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask
def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):
""" Function used to select the best model based selector """
model_based_score = 0
scaling_factors = ["0.25*mean", "0.5*mean", "median", "1.25*mean", "1.5*mean"]
# scaling_factors = ["0.5*mean", "median"]
model_based_selector = None
model_based_train_features_selected = None
model_based_test_features_selected = None
for factor in scaling_factors:
print(factor)
temp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)
temp_model_based_selector.fit(train_features, train_similarity_target)
temp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)
temp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)
regressor.fit(temp_model_based_train_features_selected, train_similarity_target)
temp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)
print("The score on the selected features (Model Based Selector): %.3f" % temp_score)
if temp_score > model_based_score:
model_based_score = temp_score
model_based_selector = temp_model_based_selector
model_based_train_features_selected = temp_model_based_train_features_selected
model_based_test_features_selected = temp_model_based_test_features_selected
model_based_mask = model_based_selector.get_support()
print("This is the model based mask: ")
print(model_based_mask)
return model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask
def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):
""" Function used to select the best iterative based selector """
iterative_based_score = 0
# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used
min_number_features = int(0.15*len(train_features[0]))
max_number_features = int(0.85*len(train_features[0]))
# min_number_features = 19
# max_number_features = 20
iterative_based_selector = None
iterative_based_train_features_selected = None
iterative_based_test_features_selected = None
for i in range(min_number_features, max_number_features):
print(i)
temp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)
temp_iterative_based_selector.fit(train_features, train_similarity_target)
temp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)
temp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)
regressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)
temp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)
print("The score on the selected features (Iterative Based Selector): %.3f" % temp_score)
if temp_score > iterative_based_score:
iterative_based_score = temp_score
iterative_based_selector = temp_iterative_based_selector
iterative_based_train_features_selected = temp_iterative_based_train_features_selected
iterative_based_test_features_selected = temp_iterative_based_test_features_selected
iterative_based_mask = iterative_based_selector.get_support()
print("This is the iterative based mask: ")
print(iterative_based_mask)
return iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask
def rfe_cross_validation(train_features, train_similarity_target, test_features):
estimator = GradientBoostingRegressor(n_estimators=100)
rfecv = RFECV(estimator, step=1, cv=10)
rfecv.fit(train_features, train_similarity_target)
selected_train_features = rfecv.transform(train_features)
selected_test_features = rfecv.transform(test_features)
rfecv_mask = rfecv.get_support()
print(rfecv_mask)
return selected_train_features, selected_test_features
def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):
""" Function used to perform feature selection """
# percentile selector
percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)
# model based selector
model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)
# iterative based selector
iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)
all_scores = []
regressor.fit(train_features, train_similarity_target)
print("The score on all features: %.3f" % regressor.score(test_features, test_similarity_target))
all_scores.append(regressor.score(test_features, test_similarity_target))
# show results for the percentile selector
all_scores.append(percentile_score)
# show results for the model based selector
all_scores.append(model_based_score)
# show results for the iterative based selector
all_scores.append(iterative_based_score)
max_value_position = all_scores.index(max(all_scores))
if max_value_position == 0:
print("Returning all features!\n")
return train_features, test_features
elif max_value_position == 1:
percentile_mask = build_mask(percentile_mask, used_features)
mask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')
debug_data(percentile_mask, mask_save_path)
print("Returning features selected with the percentile selector!\n")
return percentile_selector, percentile_train_features_selected, percentile_test_features_selected
elif max_value_position == 2:
model_based_mask = build_mask(model_based_mask, used_features)
mask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')
debug_data(model_based_mask, mask_save_path)
print("Returning features selected with the model based selector!\n")
return model_based_selector, model_based_train_features_selected, model_based_test_features_selected
else:
iterative_based_mask = build_mask(iterative_based_mask, used_features)
mask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')
debug_data(iterative_based_mask, mask_save_path)
print("Returning features selected with the iterative based selector!\n")
return iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected
def build_mask(mask, unused_features_positions):
""" Function used to complete the mask with unused features not available for feature selection """
final_mask = mask.tolist()
for i in range(len(unused_features_positions)):
if not unused_features_positions[i]:
final_mask.insert(i, False)
return final_mask
def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):
""" Function used to select the best percentile selector """
percentile_score = 0
percentiles = [25, 35, 45, 50, 55, 65, 75]
# percentiles = [45]
percentile_selector = None
percentile_train_features_selected = None
percentile_test_features_selected = None
for percentile in percentiles:
print(percentile)
temp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)
temp_percentile_selector.fit(train_features, train_similarity_target)
temp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)
temp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)
regressor.fit(temp_percentile_train_features_selected, train_similarity_target)
temp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)
print("The score on the selected features (Percentile Selector): %.3f" % temp_score)
if temp_score > percentile_score:
percentile_score = temp_score
percentile_selector = temp_percentile_selector
percentile_train_features_selected = temp_percentile_train_features_selected
percentile_test_features_selected = temp_percentile_test_features_selected
percentile_mask = percentile_selector.get_support()
print("This is the percentile mask: ")
print(percentile_mask)
percentile_mask = build_mask(percentile_mask, used_features)
mask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')
debug_data(percentile_mask, mask_save_path)
return percentile_train_features_selected, percentile_test_features_selected, percentile_selector
def run_feature_extraction(word2vec_model=None, fasttext_model=None, ptlkb64_model=None, glove300_model=None, numberbatch_model=None):
""" Function used to compute the models """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('test', help='XML file with test data')
parser.add_argument('output', help='Output tagged XML file')
args = parser.parse_args()
"""
system_mode = 0 -> uses the variant questions with the system
system_mode = 1 -> uses the PTPT and PTBR train ASSIN collection datasets with the system
system_mode = 2 -> uses the PTPT and PTBR train and test ASSIN collection datasets with the system
system_mode = 3 -> uses the Whoosh collection with the system
system_mode = 4 -> uses ASSIN 1 and ASSIN 2 training collection datasets with the system
system_mode = 5 -> uses ASSIN 1 training and testing collection and ASSIN 2 training collection datasets with the system
run_pipeline = 0 -> uses the pre-computed files with the components needed to extract some features
run_pipeline = 1 -> uses NLPyPort pipeline which avoids having to pre-compute certain components to extract features
"""
system_mode = 5
run_pipeline = 1
# Flag to indicate if the extracted features should be written to a file (1) or not (0)
features_to_file_flag = 0
# extract labels
train_pairs = []
train_pairs.extend(read_xml(ROOT_PATH + "/datasets/assin/assin1/assin-ptpt-train.xml", need_labels=True))
train_pairs.extend(read_xml(ROOT_PATH + "/datasets/assin/assin1/assin-ptbr-train.xml", need_labels=True))
if system_mode == 2 or system_mode == 5:
train_pairs.extend(read_xml(ROOT_PATH + "/datasets/assin/assin1/assin-ptpt-test.xml", need_labels=True))
train_pairs.extend(read_xml(ROOT_PATH + "/datasets/assin/assin1/assin-ptbr-test.xml", need_labels=True))
if system_mode == 4 or system_mode == 5:
train_pairs.extend(read_xml(ROOT_PATH + "/datasets/assin/assin2/assin2-train-only.xml", need_labels=True))
train_similarity_target = np.array([pair.similarity for pair in train_pairs])
# extract training features
train_corpus = read_corpus(train_pairs)
# debug_data(train_corpus, "finetune.train.raw")
# print("Wrote training corpus")
# preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0)
preprocessed_train_corpus = preprocessing(train_corpus, 0, 0, 0, 0)
train_features, used_train_features = extract_features(run_pipeline, train_corpus, preprocessed_train_corpus, word2vec_mdl=word2vec_model, fasttext_mdl=fasttext_model, ptlkb64_mdl=ptlkb64_model, glove300_mdl=glove300_model, numberbatch_mdl=numberbatch_model)
# write train features to a .csv file
if features_to_file_flag == 1:
write_features_to_csv(train_pairs, train_features, "assin1-train-test-assin2-train-ftrain.csv")
#############################################################
test_pairs_dev = read_xml('datasets/assin/assin2/assin2-dev.xml', need_labels=False)
test_corpus_dev = read_corpus(test_pairs_dev)
# preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0)
preprocessed_test_corpus_dev = preprocessing(test_corpus_dev, 0, 0, 0, 0)
test_features_dev, used_test_features_dev = extract_features(run_pipeline, test_corpus_dev, preprocessed_test_corpus_dev, word2vec_mdl=word2vec_model, fasttext_mdl=fasttext_model, ptlkb64_mdl=ptlkb64_model, glove300_mdl=glove300_model, numberbatch_mdl=numberbatch_model)
test_pairs_selection = read_xml('datasets/assin/assin2/assin2-dev.xml', need_labels=True)
test_similarity_target = np.array([pair.similarity for pair in test_pairs_selection])
#############################################################
# extract test features
# test_pairs = read_xml(args.test, need_labels=False)
# uncomment next line and comment previous one to compute ASSIN 2 submission results
test_pairs = read_xml_no_attributes(args.test)
test_corpus = read_corpus(test_pairs)
# preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0)
preprocessed_test_corpus = preprocessing(test_corpus, 0, 0, 0, 0)
test_features, used_test_features = extract_features(run_pipeline, test_corpus, preprocessed_test_corpus, | |
', ' '
# copy to tuples; magnitudes may be modified
major = [major[0], major[1]]
minor = [minor[0], minor[1]]
vertical = [vertical[0], vertical[1]]
if major[0] > 0 :
majordir = major[1]
majoreach = floor(major[0] / 8)
major[0] -= majoreach * 8
if minor[0] > 0 :
minordir = minor[1]
minoreach = floor(minor[0] / 8)
minor[0] -= minoreach * 8
if vertical[0] > 0 :
vert_dir = vertical[1]
vert_each = floor(vertical[0] / 8)
vertical[0] -= vert_each * 8
header = [
'|'.join([' ', majordir, minordir, vert_dir, '']),
'|'.join([' ', str(majoreach), str(minoreach), str(vert_each), ''])
]
grid = []
v_maj = int(major[0])
v_min = int(minor[0])
v_vert = int(vertical[0])
for i in range(8) :
(maj, min) = remainders.HORIZONTAL[v_maj][v_min][i]
h_sum = v_maj + v_min # - floor(v_min / 3)
while h_sum > 7 :
h_sum -= 8
(_, vert) = remainders.VERTICAL[h_sum][v_vert][i]
grid.append('|'.join([str(i+1), maj, min, vert, '']))
return '\n'.join(header + grid)
def cartesian(self) :
"""Get cartesian (X,Y,Z) components of the Vector.
Returns a dictionary with keys of 'X', 'Y', and 'Z', with
values of type `decimal.Decimal` for precision.
Positive 'X' is in direction 'B/C', 'Y' in 'A', and 'Z' in '+'.
"""
global _SIN60
global _HALF_DECIMAL
uvp = self._uvp_vectors
return { 'x': uvp['U'] * _SIN60,
'y': -(uvp['V'] + uvp['U'] * _HALF_DECIMAL),
'z': uvp['+']
}
#end class Vector
class AVID() :
"""Representation of a 'window' on the AVID skyball"""
# options of all possible horizontal directions
_WINDOWS_H = [
"A",
"A/B",
"B",
"B/C",
"C",
"C/D",
"D",
"D/E",
"E",
"E/F",
"F",
"F/A",
]
# each major list is for the absolute value of height offset
# each minor list the tuple representation of direction/height offsets
# orbiting an arbitrary direction at the offset distance equal to list index
_OFFSETS = {
0: [ # amber ring
[ #0
( 0, 0) # A A/B
],
[ #1
( 0, 1), # A+ A/B+
(-1, 1), # F/A+ A+
(-1, 0), # F/A A
(-1,-1), # F/A- A-
( 0,-1), # A- A/B-
( 1,-1), # A/B- B-
( 1, 0), # A/B B
( 1, 1), # A/B+ B+
],
[ #2
( 0, 2), # A++ A/B++
(-1, 2), # F/A++ A++
(-2, 2), # F++ F/A++
(-2, 1), # F+ F/A+
(-2, 0), # F F/A
(-2,-1), # F- F/A-
(-2,-2), # F-- F/A--
(-1,-2), # F/A-- A--
( 0,-2), # A-- A/B--
( 1,-2), # A/B-- B--
( 2,-2), # B-- B/C--
( 2,-1), # B- B/C-
( 2, 0), # B B/C
( 2, 1), # B+ B/C+
( 2, 2), # B++ B/C++
( 1, 2), # A/B++ B++
],
[ #3
( 0, 3), # +++ +++
(-3, 2), # E/F++ F++
(-3, 1), # E/F+ F+
(-3, 0), # E/F F
(-3,-1), # E/F- F-
(-3,-2), # E/F-- F--
( 0,-3), # --- ---
( 3,-2), # B/C-- C--
( 3,-1), # B/C- C-
( 3, 0), # B/C C
( 3, 1), # B/C+ C+
( 3, 2), # B/C++ C++
],
# 4,5,6 are found by taking the opposite window and index 6-i
],
1: [ # blue ring (+1)
[ #0
( 0, 0) # A+ A/B+
],
[ #1
( 0, 1), # A++ A/B++
(-1, 1), # F/A++ A++
(-1, 0), # F/A+ A+
(-1,-1), # F/A A
( 0,-1), # A A/B
( 1,-1), # A/B B
( 1, 0), # A/B+ B+
( 1, 1), # A/B++ B++
],
[ #2
( 0, 2), # +++ +++
(-2, 1), # F++ F/A++
(-2, 0), # F+ F/A+
(-2,-1), # F F/A
(-2,-2), # F- F/A-
(-1,-2), # F/A- A-
( 0,-2), # A- A/B-
( 1,-2), # A/B- B-
( 2,-2), # B- B/C-
( 2,-1), # B B/C
( 2, 0), # B+ B/C+
( 2, 1), # B++ B/C++
],
[ #3
( 6, 1), # D++ D/E+++
(-5, 1), # D/E++ E++
(-4, 1), # E++ E/F++
(-3, 1), # E/F++ F++
(-3, 0), # E/F+ F+
(-3,-1), # E/F F
(-3,-2), # E/F- F-
(-3,-3), # E/F-- F--
(-2,-3), # F-- F/A--
(-1,-3), # F/A-- A--
( 0,-3), # A-- A/B--
( 1,-3), # A/B-- B--
( 2,-3), # B-- B/C--
( 3,-3), # B/C-- C--
( 3,-2), # B/C- C-
( 3,-1), # B/C C
( 3, 0), # B/C+ C+
( 3, 1), # B/C++ C++
( 4, 0), # C++ C/D++
( 5, 0), # C/D++ D++
]
],
-1: [ # blue ring (-1)
[ #0
( 0, 0) # A- A/B-
],
[ #1
( 0, 1), # A A/B
(-1, 1), # F/A A
(-1, 0), # F/A- A-
(-1,-1), # F/A-- A--
( 0,-1), # A-- A/B--
( 1,-1), # A/B-- B--
( 1, 0), # A/B- B-
( 1, 1), # A/B B
],
[ #2
( 0, 2), # A+ A/B+
(-1, 2), # F/A+ A+
(-2, 2), # F+ F/A+
(-2, 1), # F F/A
(-2, 0), # F- F/A-
(-2,-1), # F-- F/A--
( 0,-2), # --- ---
( 2,-1), # B-- A/B--
( 2, 0), # B- B/C-
( 2, 1), # B B/C
( 2, 2), # B+ B/C+
( 1, 2), # A/B+ B+
],
],
2: [ # green ring (+2)
[ #0
( 0, 0), # A++ A/B++
],
[ #1
( 0, 1), # +++ +++
(-1, 0), # F/A++ A++
(-2, 0), # F++ F/A++
(-1,-1), # F/A+ A+
( 0,-1), # A+ F/A+
( 1,-1), # A/B+ B+
( 2, 0), # B+ B/C+
( 1, 0), # A/B++ B++
],
[ #2
( 6, 0), # D++ D/E++
(-5, 0), # D/E++ E++
(-4, 0), # E++ E/F++
(-3, 0), # E/F++ F++
(-2,-1), # F+ F/A+
(-2,-2), # F F/A
(-1,-2), # F/A A
( 0,-2), # A A/B
( 1,-2), # A/B B
( 2,-2), # B B/C
( 2,-1), # B+ B/C+
( 3, 0), # B/C++ C++
( 4, 0), # C++ C/D++
( 5, 0), # C/D++ D++
],
[ #3
( 6,-1), # D+ D/E+
(-5,-1), # D/E+ E+
(-4,-1), # E+ E/F+
(-3,-1), # E/F+ F+
(-3,-2), # E/F F
(-3,-3), # E/F- F-
(-2,-3), # F- F/A-
(-1,-3), # F/A- A-
( 0,-3), # A- A/B-
( 1,-3), # A/B- B-
( 2,-3), # B- B/C-
( 3,-3), # B/C- C-
( 3,-2), # B/C C
( 3,-1), # B/C+ C+
( 4,-1), # C+ C/D+
( 5,-1), # C/D+ D+
]
],
-2: [ # green ring (-2)
[ #0
( 0, 0), # A-- A/B--
],
[ #1
( 0, 1), # A- A/B-
(-1, 1), # F/A- A-
(-2, 0), # F-- F/A--
(-1, 0), # F/A-- A--
( 0,-1), # --- ---
( 1, 0), # A/B-- B--
( 2, 0), # B-- B/C--
( 1, 1), # A/B- B-
],
[ #2
( 0, 2), # A A/B
(-1, 2), # F/A A
(-2, 2), # F F/A
(-2, 1), # F- F/A-
(-3, 0), # E/F-- F--
(-4, 0), # E-- E/F--
(-5, 0), # D/E-- E--
( 6, 0), # D-- D/E--
( 5, 0), # C/D-- D--
( 4, 0), # C-- C/D--
( 3, 0), # B/C-- C--
( 2, 1), # B- B/C-
( 2, 2), # B B/C
( 1, 2), # A/B B
],
],
3: [ # fuchsia (+/-3) -- absolute window tuples
[ # 0
( 0, 3),
],
[ # 1
( 0, 2),
( 1, 2),
( 2, 2),
( 3, 2),
( 4, 2),
( 5, 2),
( 6, 2),
( 7, 2),
( 8, 2),
( 9, 2),
(10, 2),
(11, 2),
],
[ # 2
( 0, 1),
( 1, 1),
( 2, | |
<filename>Desktop Application/Advanced/Python/Sketch With Sam/Sketch With Sam.py
from tkinter import *
from tkinter import messagebox,colorchooser,filedialog
from PIL import ImageTk,Image,ImageGrab
import time
class Sketch:
def __init__(self, root):
#Take window,Window title and Canvas Control
self.window = root
self.window.title("Sketch With Sam" + "-----" + "New Window")
self.make_canvas = Canvas(self.window, width=1080, height=667, bg="white", relief=RIDGE, bd=8)
self.make_canvas.place(x=0, y=0)
#All things initialize with NULL
self.my_menu = None
self.file_menu = None
self.edit_menu = None
self.color_menu = None
self.option_menu = None
self.help_menu = None
self.coord = None
self.status = None
self.controller_set = None
self.line_img = None
self.make_line = None
self.das_img = None
self.pencil_img = None
self.circle_img = None
self.selection_img = None
self.rectangle_img = None
self.eraser_img = None
self.text_img = None
self.delete_seg = None
self.parallelogram_img = None
self.traingle_img = None
self.pentagon_img = None
self.hexagon_img = None
self.arrow_img = None
self.right_angled_traingle_img = None
self.rounded_rec_img = None
self.arrow_left_img = None
self.color_frame = None
self.choosing_color = None
self.F = None
self.O = None
self.color = None
self.permanent_color = None
self.segment_1 = None
self.eraser = None
self.segment_2 = None
self.top = None
self.text_collection = None
self.make_width_frame = None
self.shape_outline_width_label = None
self.eraser_width_label = None
self.eraser_controller = None
self.color_box = None
self.color_box_img = None
self.notation_box = None
# Coordinate controller value initialization
self.old_x = None
self.old_y = None
self.new_x = None
self.new_y = None
#All initialize Buttons
self.pencil = Button(self.window)
self.circle = Button(self.window)
self.selection_btn = Button(self.window)
self.rec = Button(self.window)
self.straight_line = Button(self.window)
self.bent_line = Button(self.window)
self.dashed_text_btn = Button(self.window)
self.parallelogram_btn = Button(self.window)
self.traingle_btn = Button(self.window)
self.pentagon_btn = Button(self.window)
self.hexagon_btn = Button(self.window)
self.arr_btn = Button(self.window)
self.text_btn = Button(self.window)
self.right_angled_traingle_btn = Button(self.window)
self.arr_left_btn = Button(self.window)
self.rounded_rec_btn = Button(self.window)
#All initialize lists
self.img_container = []
self.cut_copy_img = []
self.undo_container = []
self.temp = []
self.color_container = []
self.menu_img_container = []
self.about_img = []
#All scale initialization under Text button
self.font_size = Scale(self.top)
self.font_size.set(20)
#Input variable initialization
self.fill_information = IntVar()
self.outline_information = IntVar()
self.input_take = StringVar()
self.fill_information.set(0)
self.outline_information.set(0)
self.input_take.set(" ")
#By default set color
self.fill_color = "#FFFFFF"
self.fill_color_line = "black"
self.outline_color_line = "black"
self.text_fg = "black"
self.color_container_box = "black"
#Some value initialization
self.color_circle_width_maintainer = 15
self.img_counter = -1
self.width_controller_scale = 0
self.counter = -1
self.width_maintainer = 2
self.erase_width_maintainer = 5
self.active_coloring = 2
#Some default function call
self.control(1)#Make sure that canvas will available to use of pencil by default
self.controller()
self.make_menu()
self.make_status_bar()
self.width_controller()
self.color_set()
self.make_canvas.bind("<Control-MouseWheel>",self.zoom_controller)
self.make_canvas.bind('<Shift-MouseWheel>',self.color_box_width_controller)
self.make_canvas.bind('<Motion>',self.movement_cursor)
#Take Control on the functionality
def control(self,notation):
if self.temp:
self.make_canvas.delete(self.temp.pop())
if self.notation_box:
if self.notation_box['state'] == DISABLED:
self.notation_box['state'] = NORMAL
self.make_canvas.config(cursor="TCROSS")
self.make_canvas.unbind("<B1-Motion>")
self.make_canvas.unbind("<ButtonRelease-1>")
self.make_canvas.unbind("<Button-1>")
if notation == 1:
self.make_canvas.bind("<B1-Motion>", self.draw_with_pencil)
elif notation == 2:
self.make_canvas.bind("<B1-Motion>", self.circle_ranging)
elif notation == 3:
self.make_canvas.bind("<B1-Motion>", self.rectangle_ranging)
elif notation == 4:
self.make_canvas.bind("<B1-Motion>", self.bent_line_ranging)
self.make_canvas.bind("<Shift-B1-Motion>", self.straight_line_ranging)
elif notation == 5:
self.make_canvas.config(cursor="dotbox")
self.make_canvas.bind("<B1-Motion>", self.erasing_setup)
elif notation == 6:
self.text_creation_input_take()
elif notation == 7:
self.make_canvas.bind('<B1-Motion>', self.traingle_ranging)
elif notation == 8:
self.make_canvas.bind('<B1-Motion>', self.parallelogram_ranging)
elif notation == 9:
self.make_canvas.bind('<B1-Motion>', self.pentagon_ranging)
elif notation == 10:
self.make_canvas.bind('<B1-Motion>', self.hexagon_ranging)
elif notation == 11:
self.make_canvas.bind('<B1-Motion>', self.arrow_up_down_ranging)
elif notation == 12:
self.make_canvas.bind('<B1-Motion>', self.dashed_line_ranging)
elif notation == 13:
self.make_canvas.bind('<B1-Motion>', self.select_region)
self.window.bind('<Delete>', self.delete_selected_region)
elif notation == 14:
self.make_canvas.config(cursor="circle")
self.color_container_box = colorchooser.askcolor()[1]
self.make_canvas.bind('<B1-Motion>', self.color_boxer)
elif notation == 15:
self.make_canvas.bind('<B1-Motion>', self.arrow_left_right_ranging)
elif notation == 16:
self.make_canvas.bind('<B1-Motion>',self.rounded_rectangle_ranging)
elif notation == 17:
self.make_canvas.bind('<B1-Motion>',self.right_angled_traingle_ranging)
elif notation == 18 or notation == 19:
if notation == 18:
take = messagebox.askyesno("Clear Conformation","Are you sure to Clear?")
else:
take = messagebox.askyesno("New Window Conformation", "Are you really want to open new Window?")
self.window.title("Sketch With Sam" + "-----" + "New Window")
if take is True:
self.make_canvas.delete("all")
self.clear()
elif notation == 20:
take = messagebox.askyesno("Exit Conformation", "Are you sure to Exit?")
if take is True:
self.window.destroy()
elif notation == 21:
take = colorchooser.askcolor()[1]
if take:
self.make_canvas['bg'] = take
self.make_canvas.update()
elif notation == 22:
messagebox.showinfo("Movement Direction","At first click on the shape or line number from indexing box\n\n1. Right Arrow----> Right Movement\n\n2. Left Arrow----> Left Movement\n\n3. Up Arrow---->Up Movement\n\n4. Down Arrow--->Down Movement\n\n5. Space Button--->Stop Movement")
#Controller box setup
def controller(self):
self.controller_set = LabelFrame(self.window,text="Controller",bg="orange",fg="blue",width=250,height=684,relief=RAISED,bd=10, font=("Arial", 10, "bold"))
self.controller_set.place(x=1100,y=0)
self.notation_box = Listbox(self.controller_set, width=5, height=11, font=("Arial", 10, "bold"), fg="yellow",
bg="brown", relief=SUNKEN, bd=5)
self.notation_box.place(x=180, y=305)
self.segment_1 = Label(self.controller_set,text="Shapes and lines",bg="#FF6347",fg="brown",font=("Arial", 12, "bold"),relief=GROOVE,bd=1,padx=10,pady=1)
self.segment_1.place(x=40,y=3)
self.line_img = ImageTk.PhotoImage(Image.open("Pictures/line.jpg").resize((20, 20), Image.ANTIALIAS))
self.straight_line = Button(self.controller_set, image=self.line_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(4))
self.straight_line.place(x=10, y=40)
self.das_img = ImageTk.PhotoImage(Image.open("Pictures/dashed_line.png").resize((20, 20), Image.ANTIALIAS))
self.dashed_text_btn = Button(self.controller_set, image=self.das_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(12))
self.dashed_text_btn.place(x=70, y=40)
self.rectangle_img = ImageTk.PhotoImage(Image.open("Pictures/rectangle.jpg").resize((20, 20), Image.ANTIALIAS))
self.rec = Button(self.controller_set, image=self.rectangle_img, fg="red", bg="white", font=("Arial", 10, "bold"),relief=RAISED, bd=3, command=lambda: self.control(3))
self.rec.place(x=130,y=40)
self.parallelogram_img = ImageTk.PhotoImage(Image.open("Pictures/parallelogram.png").resize((20, 20), Image.ANTIALIAS))
self.parallelogram_btn = Button(self.controller_set, image=self.parallelogram_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3,
command=lambda: self.control(8))
self.parallelogram_btn.place(x=190, y=40)
self.traingle_img = ImageTk.PhotoImage(Image.open("Pictures/traingle.jpg").resize((20, 20), Image.ANTIALIAS))
self.traingle_btn = Button(self.controller_set, image=self.traingle_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(7))
self.traingle_btn.place(x=10, y=100)
self.pentagon_img = ImageTk.PhotoImage(Image.open("Pictures/pentagon.png").resize((20, 20), Image.ANTIALIAS))
self.pentagon_btn = Button(self.controller_set, image=self.pentagon_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(9))
self.pentagon_btn.place(x=70, y=100)
self.hexagon_img = ImageTk.PhotoImage(Image.open("Pictures/hexagon.png").resize((20, 20), Image.ANTIALIAS))
self.hexagon_btn = Button(self.controller_set, image=self.hexagon_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(10))
self.hexagon_btn.place(x=130, y=100)
self.arrow_img = ImageTk.PhotoImage(Image.open("Pictures/arrow.png").resize((20, 20), Image.ANTIALIAS))
self.arr_btn = Button(self.controller_set, image=self.arrow_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(11))
self.arr_btn.place(x=190, y=100)
self.circle_img = ImageTk.PhotoImage(Image.open("Pictures/circle.png").resize((20, 20), Image.ANTIALIAS))
self.circle = Button(self.controller_set, image=self.circle_img, bg="white", fg="red",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(2))
self.circle.place(x=10, y=160)
self.right_angled_traingle_img = ImageTk.PhotoImage(Image.open("Pictures/right_angled_traingle.png").resize((20, 20), Image.ANTIALIAS))
self.right_angled_traingle_btn = Button(self.controller_set, image=self.right_angled_traingle_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(17))
self.right_angled_traingle_btn.place(x=70, y=160)
self.rounded_rec_img = ImageTk.PhotoImage(Image.open("Pictures/rounded_rectangle.png").resize((20, 20), Image.ANTIALIAS))
self.rounded_rec_btn = Button(self.controller_set, image=self.rounded_rec_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(16))
self.rounded_rec_btn.place(x=130, y=160)
self.arrow_left_img = ImageTk.PhotoImage(Image.open("Pictures/left_arrow.png").resize((20, 20), Image.ANTIALIAS))
self.arr_left_btn = Button(self.controller_set, image=self.arrow_left_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(15))
self.arr_left_btn.place(x=190, y=160)
self.segment_2 = Label(self.controller_set, text="Tools Collection", bg="#FF6347", fg="brown",
font=("Arial", 12, "bold"), relief=GROOVE, bd=1, padx=10, pady=1)
self.segment_2.place(x=40, y=210)
self.pencil_img = ImageTk.PhotoImage(Image.open("Pictures/pencil.png").resize((17, 17), Image.ANTIALIAS))
self.pencil = Button(self.controller_set, image=self.pencil_img, bg="white", fg="red",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(1))
self.pencil.place(x=10, y=250)
self.eraser_img = ImageTk.PhotoImage(Image.open("Pictures/eraser.png").resize((17, 17), Image.ANTIALIAS))
self.eraser = Button(self.controller_set, image=self.eraser_img, fg="red", bg="white", font=("Arial", 10, "bold"),relief=RAISED, bd=3, command=lambda: self.control(5))
self.eraser.place(x=60,y=250)
self.text_img = ImageTk.PhotoImage(Image.open("Pictures/text_pic.png").resize((17, 17), Image.ANTIALIAS))
self.text_btn = Button(self.controller_set, image=self.text_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(6))
self.text_btn.place(x=110, y=250)
self.selection_img = ImageTk.PhotoImage(Image.open("Pictures/selection_box.png").resize((17, 17), Image.ANTIALIAS))
self.selection_btn = Button(self.controller_set, image=self.selection_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3, command=lambda: self.control(13))
self.selection_btn.place(x=160, y=250)
self.color_box_img = ImageTk.PhotoImage(Image.open("Pictures/color_conatiner.png").resize((17, 17), Image.ANTIALIAS))
self.color_box = Button(self.controller_set, image=self.color_box_img, fg="red", bg="white",
font=("Arial", 10, "bold"), relief=RAISED, bd=3,command=lambda: self.control(14))
self.color_box.place(x=200, y=250)
#Movement keyboard setup
self.window.bind('<space>', self.movement)
self.window.bind('<Left>', self.movement)
self.window.bind('<Right>', self.movement)
self.window.bind('<Up>', self.movement)
self.window.bind('<Down>', self.movement)
#Menu setup
def make_menu(self):
self.my_menu = Menu(self.window)
self.window.config(menu=self.my_menu)
menu_img = ["new_img.jpg", "open_img.jpg", "save_img.png", "exit_img.png", "undo_img.jpg", "clear_img.png", "cut_img.png", "copy_img.jpg", "paste_img.jpg","screenshot_img.jpg", "bgcolor_img.jpg", "fill_outline_img.png", "zoom_in_img.png", "zoom_out_img.png", "colorpen_img.png", "movement_img.png","about_img.jpg"]
for i in range(17):
self.menu_img_container.append(i)
self.menu_img_container[i] = ImageTk.PhotoImage(
Image.open("Pictures/" + menu_img[i]).resize((30, 30), Image.ANTIALIAS))
self.file_menu = Menu(self.my_menu,tearoff=False)
self.my_menu.add_cascade(label="File",menu=self.file_menu)
self.file_menu.add_command(label="New",accelerator="(Ctrl+N)",command=lambda: self.control(19),image=self.menu_img_container[0],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.file_menu.add_command(label="Open",accelerator="(Ctrl+O)",command=lambda: self.open_file(False),image=self.menu_img_container[1],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.file_menu.add_command(label="Save",accelerator="(Ctrl+S)",command=lambda: self.save_file(False),state=DISABLED,image=self.menu_img_container[2],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.file_menu.add_command(label="Exit",command=lambda: self.control(20),image=self.menu_img_container[3],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.window.bind('<Control-Key-n>', lambda e:self.control(19))
self.window.bind('<Control-Key-o>', self.open_file)
self.window.bind('<Control-Key-s>', self.save_file)
self.edit_menu = Menu(self.my_menu, tearoff=False)
self.my_menu.add_cascade(label="Edit", menu=self.edit_menu)
self.edit_menu.add_command(label="Undo", command=lambda: self.undo(False),accelerator="(Ctrl+Z)",state=DISABLED,image=self.menu_img_container[4],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.edit_menu.add_command(label="Clear", command=lambda: self.control(18),state=DISABLED,image=self.menu_img_container[5],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.edit_menu.add_command(label="Cut", command=lambda: self.cut(False), accelerator="(Ctrl+X)",state=DISABLED,image=self.menu_img_container[6],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.edit_menu.add_command(label="Copy", command=lambda: self.copy(0),accelerator="(Ctrl+C)",state=DISABLED,image=self.menu_img_container[7],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.edit_menu.add_command(label="Paste", command=lambda: self.paste(False),accelerator="(Ctrl+V)",state=DISABLED,image=self.menu_img_container[8],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.edit_menu.add_command(label="Screen Shot", command=lambda: self.screen_shot_taking(False),accelerator="(Ctrl+Alt+C)",state=DISABLED,image=self.menu_img_container[9],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.window.bind("<Control-Key-z>",self.undo)
self.window.bind("<Control-Key-x>", self.cut)
self.window.bind("<Control-Key-c>",self.copy)
self.window.bind("<Control-Key-v>", self.paste)
self.window.bind("<Control-Alt-Key-c>",self.screen_shot_taking)
self.color_menu = Menu(self.my_menu, tearoff=False)
self.my_menu.add_cascade(label="Color", menu=self.color_menu)
self.color_menu.add_command(label="Change Background Color",command=lambda: self.control(21),image=self.menu_img_container[10],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.color_menu.add_command(label="Change Permanent Fill and Outline Color", command=self.set_permanent_choose_color,image=self.menu_img_container[11],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.option_menu = Menu(self.my_menu, tearoff=False)
self.my_menu.add_cascade(label="Option", menu=self.option_menu)
self.option_menu.add_command(label="Zoom in",accelerator="(Ctrl+Scroll up)",command=lambda: self.zoom_controller(1),image=self.menu_img_container[12],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.option_menu.add_command(label="Zoom out", accelerator="(Ctrl+Scroll down)",command=lambda: self.zoom_controller(0),image=self.menu_img_container[13],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.option_menu.add_separator(background="green")
self.option_menu.add_command(label="Color Pen Width Increase", accelerator="(Shift+Scroll up)", command=lambda: self.color_box_width_controller(1),image=self.menu_img_container[14],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.option_menu.add_command(label="Color Pen Width Decrease", accelerator="(Shift+Scroll down)", command=lambda: self.color_box_width_controller(0),image=self.menu_img_container[14],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.option_menu.add_separator(background="green")
self.option_menu.add_command(label="Movement", state=DISABLED, command=lambda: self.control(22),image=self.menu_img_container[15],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.help_menu = Menu(self.my_menu, tearoff=False)
self.my_menu.add_cascade(label="Help", menu=self.help_menu)
self.help_menu.add_command(label="About",command=self.about,image=self.menu_img_container[16],compound=LEFT,background="green",foreground="yellow",font=("Arial",10,"bold"),activebackground="yellow",activeforeground="green")
self.help_menu.add_command(label="Tips", command=self.tips, image=self.menu_img_container[16], compound=LEFT, background="green", foreground="yellow", font=("Arial", 10, "bold"), activebackground="yellow", activeforeground="green")
def movement_cursor(self, e):#For cursor position by movement
self.coord.config(text=str(e.x) + "," + str(e.y) + "px")
def make_status_bar(self):#Make status bar
self.status = Label(self.window, text="Sketch With Passion", fg="#292929", bg="#707070", font=("Arial", 12, "bold"))
self.status.place(x=1150, y=685)
self.coord = Label(self.window, text="", fg="#292929", bg="#707070", font=("Arial", 9, "bold"))
self.coord.place(x=20, y=687)
def open_file(self,e):#For open a file
self.status['text'] = "Open a File"
if self.notation_box['state'] == DISABLED:
self.notation_box['state'] = NORMAL
self.make_canvas.unbind("<B1-Motion>")
self.make_canvas.unbind("<ButtonRelease-1>")
self.make_canvas.unbind("<Button-1>")
image_mine = filedialog.askopenfilename(initialdir="\Desktop", title="Select an image",filetypes=(("JPEG Images", "*.jpg"), ("All images", "*.*")))
if image_mine:
self.img_container.append(ImageTk.PhotoImage(Image.open(image_mine)))
self.img_counter+=1
take = self.make_canvas.create_image(100, 200, image=self.img_container[self.img_counter])
self.undo_container.append(take)
self.notation_box.insert(END,len(self.undo_container)-1)
self.reset()
self.control(1)
def save_file(self,e):#for save a file
self.status['text'] = "Save current file"
self.status.place(x=1150, y=685)
file = filedialog.asksaveasfilename(initialdir="Saved_file",filetypes=[("PNG File","*.png")])
if file:
| |
single input layer.")
parent = inputs[0]
out_tensor = nn.Softmax(parent)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Sigmoid(Layer):
""" Compute the sigmoid of input: f(x) = sigmoid(x)
Only one input is allowed, output will have the same shape as input
"""
def __init__(self, in_layers=None, **kwargs):
super(Sigmoid, self).__init__(in_layers, **kwargs)
try:
self._shape = tuple(self.in_layers[0].shape)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError("Sigmoid must have a single input layer.")
parent = inputs[0]
out_tensor = torch.sigmoid(parent)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class ReLU(Layer):
""" Compute the relu activation of input: f(x) = relu(x)
Only one input is allowed, output will have the same shape as input
"""
def __init__(self, in_layers=None, **kwargs):
super(ReLU, self).__init__(in_layers, **kwargs)
try:
self._shape = tuple(self.in_layers[0].shape)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError("ReLU must have a single input layer.")
parent = inputs[0]
out_tensor = nn.ReLU(parent)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Concat(Layer):
def __init__(self, in_layers=None, dim=1, **kwargs):
self.dim = dim
super(Concat, self).__init__(in_layers, **kwargs)
try:
s = list(self.in_layers[0].shape)
for parent in self.in_layers[1:]:
if s[dim] is None or parent.shape[dim] is None:
s[dim] = None
else:
s[dim] += parent.shape[dim]
self._shape = tuple(s)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) == 1:
self.out_tensor = inputs[0]
return self.out_tensor
out_tensor = torch.cat(inputs, dim=self.dim)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Stack(Layer):
def __init__(self, in_layers=None, dim=1, **kwargs):
self.dim = dim
super(Stack, self).__init__(in_layers, **kwargs)
try:
s = list(self.in_layers[0].shape)
s.insert(dim, len(self.in_layers))
self._shape = tuple(s)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
out_tensor = torch.stack(inputs, dim=self.dim)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Constant(Layer):
"""Output a constant value."""
def __init__(self, val, dtype=torch.float32, **kwargs):
"""Construct a constant layer.
Parameters
----------
value: array
the value the layer should output
dtype: torch.DType
the data type of the output value.
"""
if not isinstance(val, np.ndarray):
val = np.array(val)
self.val = val
self.dtype = dtype
self._shape = tuple(val.shape)
super(Constant, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
out_tensor = nn.init.constant_(self.out_tensor,self.val)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Variable(Layer):
"""Output a trainable value."""
def __init__(self, initial_value, dtype=torch.float32, **kwargs):
"""Construct a variable layer.
Parameters
----------
initial_value: array
the initial value the layer should output
dtype: torch.DType
the data type of the output value.
"""
if not isinstance(initial_val, np.ndarray):
initial_val = np.array(initial_val)
self.initial_val = initial_val
self.dtype = dtype
self._shape = tuple(initial_value.shape)
super(Variable, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
if torch.cuda.is_available():
if not self._built:
self.variables = [torch.Tensor(self.initial_value, dtype=self.dtype)]
self._built = True
out_tensor = self.variables[0]
else:
out_tensor = torch.Tensor(self.initial_value, dtype=self.dtype)
if set_tensors:
self._record_variable_scope(self.name)
self.out_tensor = out_tensor
return out_tensor
class StopGradient(Layer):
"""Block the flow of gradients.
This layer copies its input directly to its output, but reports that all
gradients of its output are zero. This means, for example, that optimizers
will not try to optimize anything "upstream" of this layer.
For example, suppose you have pre-trained a stack of layers to perform a
calculation. You want to use the result of that calculation as the input to
another layer, but because they are already pre-trained, you do not want the
optimizer to modify them. You can wrap the output in a StopGradient layer,
then use that as the input to the next layer."""
def __init__(self, in_layers=None, **kwargs):
super(StopGradient, self).__init__(in_layers, **kwargs)
try:
self._shape = tuple(self.in_layers[0].shape)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) > 1:
raise ValueError("Only one layer supported.")
out_tensor = torch.Tensor.detach(inputs[0])
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
def _max_dimension(x, y):
if x is None:
return y
if y is None:
return x
return max(x, y)
class Add(Layer):
"""Compute the (optionally weighted) sum of the input layers."""
def __init__(self, in_layers=None, weights=None, **kwargs):
"""Create an Add layer.
Parameters
----------
weights: array
an array of length equal to the number of input layers, giving the weight
to multiply each input by. If None, all weights are set to 1.
"""
super(Add, self).__init__(in_layers, **kwargs)
self.weights = weights
try:
shape1 = list(self.in_layers[0].shape)
shape2 = list(self.in_layers[1].shape)
if len(shape1) < len(shape2):
shape2, shape1 = shape1, shape2
offset = len(shape1) - len(shape2)
for i in range(len(shape2)):
shape1[i + offset] = _max_dimension(shape1[i + offset], shape2[i])
self._shape = tuple(shape1)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
weights = self.weights
if weights is None:
weights = [1] * len(inputs)
out_tensor = inputs[0]
if weights[0] != 1:
out_tensor *= weights[0]
for layer, weight in zip(inputs[1:], weights[1:]):
if weight == 1:
out_tensor += layer
else:
out_tensor += weight * layer
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Multiply(Layer):
"""Compute the product of the input layers."""
def __init__(self, in_layers=None, **kwargs):
super(Multiply, self).__init__(in_layers, **kwargs)
try:
shape1 = list(self.in_layers[0].shape)
shape2 = list(self.in_layers[1].shape)
if len(shape1) < len(shape2):
shape2, shape1 = shape1, shape2
offset = len(shape1) - len(shape2)
for i in range(len(shape2)):
shape1[i + offset] = _max_dimension(shape1[i + offset], shape2[i])
self._shape = tuple(shape1)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
out_tensor = inputs[0]
for layer in inputs[1:]:
out_tensor *= layer
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Divide(Layer):
"""Compute the ratio of the input layers."""
def __init__(self, in_layers=None, **kwargs):
super(Divide, self).__init__(in_layers, **kwargs)
try:
shape1 = list(self.in_layers[0].shape)
shape2 = list(self.in_layers[1].shape)
if len(shape1) < len(shape2):
shape2, shape1 = shape1, shape2
offset = len(shape1) - len(shape2)
for i in range(len(shape2)):
shape1[i + offset] = _max_dimension(shape1[i + offset], shape2[i])
self._shape = tuple(shape1)
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
out_tensor = inputs[0] / inputs[1]
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Log(Layer):
"""Compute the natural log of the input."""
def __init__(self, in_layers=None, **kwargs):
super(Log, self).__init__(in_layers, **kwargs)
try:
self._shape = self.in_layers[0].shape
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError('Log must have a single parent')
out_tensor = torch.log(inputs[0])
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class Exp(Layer):
"""Compute the exponential of the input."""
def __init__(self, in_layers=None, **kwargs):
super(Exp, self).__init__(in_layers, **kwargs)
try:
self._shape = self.in_layers[0].shape
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 1:
raise ValueError('Exp must have a single parent')
out_tensor = torch.exp(inputs[0])
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class InteratomicL2Distances(Layer):
"""Compute (squared) L2 Distances between atoms given neighbors."""
def __init__(self, N_atoms, M_nbrs, ndim, **kwargs):
self.N_atoms = N_atoms
self.M_nbrs = M_nbrs
self.ndim = ndim
super(InteratomicL2Distances, self).__init__(**kwargs)
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers)
if len(inputs) != 2:
raise ValueError("InteratomicDistances requires coords,nbr_list")
coords, nbr_list = (inputs[0], inputs[1])
N_atoms, M_nbrs, ndim = self.N_atoms, self.M_nbrs, self.ndim
# Shape (N_atoms, M_nbrs, ndim)
nbr_coords = torch.gather(coords, nbr_list)
# Shape (N_atoms, M_nbrs, ndim)
tiled_coords = torch.Tensor.repeat(
torch.reshape(coords, (N_atoms, 1, ndim)), (1, M_nbrs, 1))
# Shape (N_atoms, M_nbrs)
dists = torch.sum((tiled_coords - nbr_coords)**2, dim=2)
out_tensor = dists
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class SparseSoftMaxCrossEntropy(Layer):
"""Computes Sparse softmax cross entropy between logits and labels.
labels: Tensor of shape [d_0,d_1,....,d_{r-1}](where r is rank of logits) and must be of dtype int32 or int64.
logits: Unscaled log probabilities of shape [d_0,....d{r-1},num_classes] and of dtype float32 or float64.
Note: the rank of the logits should be 1 greater than that of labels.
The output will be a tensor of same shape as labels and of same type as logits with the loss.
"""
def __init__(self, in_layers=None, **kwargs):
super(SparseSoftMaxCrossEntropy, self).__init__(in_layers, **kwargs)
try:
self._shape = self.in_layers[1].shape[:-1]
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers, False)
if len(inputs) != 2:
raise ValueError()
labels, logits = inputs[0], inputs[1]
out_tensor = nn.CrossEntropyLoss(
weight=weight, size_average=size_average)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class SoftMaxCrossEntropy(Layer):
def __init__(self, in_layers=None, **kwargs):
super(SoftMaxCrossEntropy, self).__init__(in_layers, **kwargs)
try:
self._shape = self.in_layers[1].shape[:-1]
except:
pass
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
inputs = self._get_input_tensors(in_layers, True)
if len(inputs) != 2:
raise ValueError()
labels, logits = inputs[0], inputs[1]
out_tensor = nn.CrossEntropyLoss(
weight=weight, size_average=size_average)
if set_tensors:
self.out_tensor = out_tensor
return out_tensor
class SigmoidCrossEntropy(Layer):
""" Compute the sigmoid cross entropy of inputs: [labels, logits]
`labels` hold the binary labels(with no axis of n_classes),
`logits` hold the log probabilities for positive class(label=1),
`labels` and `logits` should have | |
# Copyright (c) 2010-2016 <NAME>
# Copyright (c) 2010-2016 <NAME>
# Copyright (c) 2010-2016 Stony Brook University
# Copyright (c) 2010-2016 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import copy
import time
import os.path
import logging
import warnings
import threading
from datetime import datetime
from collections import abc, deque, namedtuple
from inspect import signature, Parameter
from functools import wraps
MAJOR_VERSION = 1
MINOR_VERSION = 0
PATCH_VERSION = 9
PRERELEASE_VERSION = ""
__version__ = "{}.{}.{}{}".format(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION,
PRERELEASE_VERSION)
INCOQ_MODULE_NAME = "incoq.mars.runtime"
CONSOLE_LOG_FORMAT = \
'[%(relativeCreated)d] %(name)s<%(processName)s>:%(levelname)s: %(message)s'
FILE_LOG_FORMAT = \
'[%(asctime)s] %(name)s<%(processName)s>:%(levelname)s: %(message)s'
# a dict that contains the runtime configuration values:
GlobalOptions = None
# a dict containing configuration overrides set by the `config` API:
GlobalConfig = dict()
# Process id of the node process:
CurrentNode = None
# incoq.runtime.Type, only if using incoq:
IncOQBaseType = None
# Define custom levels for text output from user code using the `output`,
# `debug`, and `error` builtins, to differentiate output from user programs and
# log messages from the DistAlgo system:
logging.addLevelName(logging.INFO+1, "OUTPUT")
logging.addLevelName(logging.INFO+2, "USRERR")
logging.addLevelName(logging.DEBUG+1, "USRDBG")
log = logging.getLogger(__name__)
api_registry = dict()
builtin_registry = dict()
internal_registry = dict()
class InvalidStateException(RuntimeError): pass
def get_runtime_option(key, default=None):
if GlobalOptions is None:
if default is None:
raise InvalidStateException("DistAlgo is not initialized.")
else:
return default
return GlobalOptions.get(key, default)
def set_runtime_option(key, value):
if GlobalOptions is None:
raise InvalidStateException("DistAlgo is not initialized.")
GlobalOptions[key] = value
def _version_as_bytes():
"""Return a 4-byte representation of the version.
"""
prerelease = sum(ord(c) for c in PRERELEASE_VERSION) % 256
return (((MAJOR_VERSION & 0xff) << 24) | ((MINOR_VERSION & 0xff) << 16) |
((PATCH_VERSION & 0xff) << 8) | prerelease).to_bytes(4, 'big')
VERSION_BYTES = _version_as_bytes()
def initialize_runtime_options(configs):
global GlobalOptions
if GlobalOptions is not None:
raise InvalidStateException("DistAlgo is already initialized")
GlobalOptions = dict(configs)
def _restore_runtime_options(params):
global GlobalOptions, GlobalConfig
GlobalOptions, GlobalConfig = params
def set_global_config(props):
GlobalConfig.update(props)
def global_config():
return GlobalConfig
def _set_node(node_id):
global CurrentNode
CurrentNode = node_id
def pid_of_node():
return CurrentNode
def get_inc_module():
if GlobalOptions is None:
raise InvalidStateException("DistAlgo is not initialized.")
if not hasattr(sys.modules, GlobalOptions['inc_module_name']):
return None
return sys.modules[GlobalOptions['inc_module_name']]
def sysinit():
pid_format = get_runtime_option('pid_format')
if pid_format == 'full':
ProcessId.__str__ = ProcessId.__repr__ = ProcessId._full_form_
elif pid_format == 'long':
ProcessId.__str__ = ProcessId.__repr__ = ProcessId._long_form_
else:
# default is short
pass
load_modules()
def _restore_module_logging():
assert '_da_module_cache' in GlobalOptions
for modulename in GlobalOptions['_da_module_cache']:
consolefmt, filefmt = GlobalOptions['_da_module_cache'][modulename]
setup_logging_for_module(modulename, consolefmt, filefmt)
def setup_logging_for_module(modulename,
consolefmt=CONSOLE_LOG_FORMAT,
filefmt=FILE_LOG_FORMAT):
"""Configures package level logger.
"""
if GlobalOptions is None:
# We're not yet initialized, which will happen when using `import_da`
# under spawning semantics. This is fine, as logging will be setup after
# `OSProcessContainer.run` gets called. We can safely ignore this call:
return
if '_da_module_cache' not in GlobalOptions:
GlobalOptions['_da_module_cache'] = dict()
GlobalOptions['_da_module_cache'][modulename] = (consolefmt, filefmt)
rootlog = logging.getLogger(modulename)
rootlog.handlers = [] # Clear all handlers
if not GlobalOptions['no_log']:
rootlog.propagate = False
rootlog.setLevel(logging.DEBUG)
consoleformatter = logging.Formatter(consolefmt)
consolelvl = logging._nameToLevel[GlobalOptions['logconsolelevel'].upper()]
ch = logging.StreamHandler()
ch.setFormatter(consoleformatter)
ch.setLevel(consolelvl)
rootlog._consolelvl = consolelvl
rootlog.addHandler(ch)
if GlobalOptions['logfile']:
filelvl = logging._nameToLevel[GlobalOptions['logfilelevel'].upper()]
logfilename = GlobalOptions['logfilename']
if logfilename is None:
if GlobalOptions['file'] is not None:
logfilename = os.path.basename(GlobalOptions['file'])
elif GlobalOptions['module'] is not None:
logfilename = GlobalOptions['module'][0]
else:
logfilename = datetime.now().strftime('%Y-%m-%d_%H%M%S')
logfilename += '.log'
fh = logging.FileHandler(logfilename)
formatter = logging.Formatter(filefmt)
fh.setFormatter(formatter)
fh.setLevel(filelvl)
rootlog._filelvl = filelvl
rootlog.addHandler(fh)
if GlobalOptions['logdir'] is not None:
os.makedirs(GlobalOptions['logdir'], exist_ok=True)
rootlog._logdir = GlobalOptions['logdir']
else:
rootlog._logdir = None
else:
rootlog.addHandler(logging.NullHandler())
def load_modules():
import importlib
global IncOQBaseType
if not GlobalOptions['load_inc_module']:
return
main = sys.modules[GlobalOptions['this_module_name']]
inc = importlib.import_module(GlobalOptions['inc_module_name'])
if inc.JbStyle:
IncOQBaseType = importlib.import_module(INCOQ_MODULE_NAME) \
.IncOQType
if GlobalOptions['control_module_name'] is not None:
ctrl = importlib.import_module(GlobalOptions['control_module_name'])
main.IncModule = ModuleIntrument(ctrl, inc)
else:
main.IncModule = inc
####################
# Process ID
####################
ILLEGAL_NAME_CHARS = set('@#:')
def check_name(name):
return not (set(name) & ILLEGAL_NAME_CHARS)
def name_split_host(name):
"""Splits `name` into 'processname', 'hostname', and 'port' components."""
comps = name.split('@')
if len(comps) == 2:
name, suffix = comps
suffix = suffix.split(':')
if len(suffix) > 2:
return (None, None, None)
elif len(suffix) == 2:
host, port = suffix
try:
return (name, host, int(port))
except ValueError:
return (None, None, None)
elif len(suffix) == 1:
return (name, suffix[0], None)
else:
return (name, None, None)
elif len(comps) == 1:
return (comps[0], None, None)
else:
return (None, None, None)
def name_split_node(name):
"""Splits `name` into 'processname', 'nodename' components."""
assert '@' not in name
comps = name.split('#')
if len(comps) == 2:
return tuple(comps)
elif len(comps) == 1:
return (comps[0], comps[0])
else:
return (None, None)
class ProcessId(namedtuple("_ProcessId",
'uid, seqno, pcls, \
name, nodename, hostname, transports')):
"""An instance of `ProcessId` uniquely identifies a DistAlgo process instance.
A `ProcessId` instance should contain all necessary information for any
DistAlgo process in a distributed system to send messages to that process.
This includes the network addresses of all ports the process listens on.
There is a total ordering over the set of all `ProcessId`s. `ProcessId`s
referring to the same process will always compare equal.
From the point of view of DistAlgo programs, `ProcessId` instances are
opaque objects -- no assumptions should be made about the internal structure
of `ProcessId` instances.
"""
__slots__ = ()
_pid_counter = 0
_lock = threading.Lock()
_named = dict()
_callbacks = dict()
def __new__(cls, uid, seqno, pcls, name, nodename, hostname, transports):
obj = super().__new__(cls, uid, seqno, pcls, name, nodename, hostname,
transports)
if len(name) > 0:
fullname = (name, nodename)
with ProcessId._lock:
entry = ProcessId._named.get(fullname, None)
callbacks = ProcessId._callbacks.get(fullname, None)
if isinstance(entry, ProcessId):
if obj < entry:
# cached id is more recent than the new one, so use the
# cached entry:
obj = entry
elif obj.uid != entry.uid:
log.warning("Process name '%s#%s' reassigned from %s "
"to %s.", name, nodename,
ProcessId._full_form_(entry),
ProcessId._full_form_(obj))
if entry != obj:
ProcessId._named[fullname] = obj
if callbacks is not None:
del ProcessId._callbacks[fullname]
if type(callbacks) is list:
for callback in callbacks:
assert callable(callback)
callback(obj)
return obj
@staticmethod
def lookup(name):
return ProcessId._named.get(name, None)
@staticmethod
def lookup_or_register_callback(name, callback):
with ProcessId._lock:
if name not in ProcessId._named:
if name not in ProcessId._callbacks:
ProcessId._callbacks[name] = [callback]
else:
ProcessId._callbacks[name].append(callback)
return None
else:
return ProcessId._named[name]
@staticmethod
def all_named_ids():
with ProcessId._lock:
return list(ProcessId._named.values())
@staticmethod
def drop_entry(nid):
with ProcessId._lock:
if nid.name in ProcessId._named:
del ProcessId._named[nid.name]
@staticmethod
def gen_uid(hostname, pid):
"""Generate a globally unique 96-bit id.
"""
# 54 bits of timestamp:
tstamp = int(time.time() * 1000) & 0x3fffffffffffff
# 16 bits of hostname hash
hh = int(hash(hostname)) & 0xffff
# 16 bits of os pid
pid %= 0xffff
# 10 bit global counter
with ProcessId._lock:
cnt = ProcessId._pid_counter = (ProcessId._pid_counter + 1) % 1024
return (tstamp << 42) | (hh << 26) | (pid << 10) | cnt
@classmethod
def _create(idcls, pcls, transports, name=""):
"""Creates a new `ProcessId` instance.
"""
hostname = get_runtime_option('hostname')
nodename = get_runtime_option('nodename')
uid = ProcessId.gen_uid(hostname,
pid=threading.current_thread().ident)
return idcls(uid=uid, seqno=1, pcls=pcls,
name=name, nodename=nodename,
hostname=hostname, transports=transports)
def _short_form_(self):
"""Constructs a short string representation of this pid.
This form is more suitable for use in output strings.
"""
if len(self.nodename) > 0 and self.nodename != self.name:
if len(self.name) > 0:
return "<{0.pcls.__name__}:{0.name}#{0.nodename}>".format(self)
else:
# otherwise, we use `uid` truncated to the last 5 hex digits:
return "<{0.pcls.__name__}:{1:05x}#{0.nodename}>".format(
self, self.uid & 0xfffff)
else:
if len(self.name) > 0:
return "<{0.pcls.__name__}:{0.name}>".format(self)
else:
return "<{0.pcls.__name__}:{1:05x}>".format(self, self.uid & 0xfffff)
def _long_form_(self):
"""Constructs a short string representation of this pid.
This form is more suitable for use in output strings.
"""
if len(self.nodename) > 0 and self.nodename != self.name:
if len(self.name) > 0:
return "<{0.pcls.__name__}:{0.name}#{0.nodename}>".format(self)
else:
# otherwise, | |
"""
Core implementation of :mod:`sklearndf.transformation.wrapper`
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.manifold import Isomap
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, PolynomialFeatures
from pytools.api import AllTracker
from ... import TransformerDF
from ...wrapper import TransformerWrapperDF
log = logging.getLogger(__name__)
__all__ = [
"BaseDimensionalityReductionWrapperDF",
"BaseMultipleInputsPerOutputTransformerWrapperDF",
"ColumnPreservingTransformerWrapperDF",
"ColumnSubsetTransformerWrapperDF",
"ComponentsDimensionalityReductionWrapperDF",
"FeatureSelectionWrapperDF",
"NComponentsDimensionalityReductionWrapperDF",
"NumpyTransformerWrapperDF",
"ColumnTransformerWrapperDF",
"IsomapWrapperDF",
"ImputerWrapperDF",
"MissingIndicatorWrapperDF",
"AdditiveChi2SamplerWrapperDF",
"KBinsDiscretizerWrapperDF",
"PolynomialFeaturesWrapperDF",
"OneHotEncoderWrapperDF",
]
#
# type variables
#
T_Transformer = TypeVar("T_Transformer", bound=TransformerMixin)
# T_Imputer is needed because sklearn's _BaseImputer only exists from v0.22 onwards.
# Once we drop support for sklearn 0.21, _BaseImputer can be used instead.
# The following TypeVar helps to annotate availability of "add_indicator" and
# "missing_values" attributes on an imputer instance for ImputerWrapperDF below
# noinspection PyProtectedMember
from sklearn.impute._iterative import IterativeImputer
T_Imputer = TypeVar("T_Imputer", SimpleImputer, IterativeImputer)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# wrapper classes for transformers
#
class NumpyTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that only accept numpy arrays.
Converts data frames to numpy arrays before handing off to the native transformer.
Implementations must define :meth:`_get_features_original`.
"""
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> np.ndarray:
assert to_numpy is not False, "X must be converted to a numpy array"
return super()._adjust_X_type_for_delegate(X, to_numpy=True)
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Optional[np.ndarray]:
assert to_numpy is not False, "y must be converted to a numpy array"
return super()._adjust_y_type_for_delegate(y, to_numpy=True)
class ColumnSubsetTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that do not change column names,
but that may remove one or more columns.
Implementations must define :meth:`_get_features_out`.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# return column labels for arrays returned by the fitted transformer.
pass
def _get_features_original(self) -> pd.Series:
# return the series with output columns in index and output columns as values
features_out = self._get_features_out()
return pd.Series(index=features_out, data=features_out.values)
class ColumnPreservingTransformerWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
):
"""
DF wrapper for transformers whose output columns match the input columns.
The native transformer must not add, remove, reorder, or rename any of the input
columns.
"""
def _get_features_out(self) -> pd.Index:
return self.feature_names_in_
class BaseMultipleInputsPerOutputTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer]
):
"""
DF wrapper for transformers mapping multiple input columns to individual output
columns.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# make this method abstract to ensure subclasses override the default
# behaviour, which usually relies on method ``_get_features_original``
pass
def _get_features_original(self) -> pd.Series:
raise NotImplementedError(
f"{type(self.native_estimator).__name__} transformers map multiple "
"inputs to individual output columns; current sklearndf implementation "
"only supports many-to-1 mappings from output columns to input columns"
)
class BaseDimensionalityReductionWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers.
The native transformer is considered to map all input columns to each output column.
"""
@property
@abstractmethod
def _n_components_(self) -> int:
pass
def _get_features_out(self) -> pd.Index:
return pd.Index([f"x_{i}" for i in range(self._n_components_)])
class NComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
:attr:`n_components` attribute.
Subclasses must implement :meth:`_get_features_original`.
"""
_ATTR_N_COMPONENTS = "n_components"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_N_COMPONENTS)
@property
def _n_components_(self) -> int:
return getattr(self.native_estimator, self._ATTR_N_COMPONENTS)
class ComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
``components_`` attribute.
The native transformer must provide a ``components_`` attribute once fitted,
as an array of shape (n_components, n_features).
"""
_ATTR_COMPONENTS = "components_"
# noinspection PyPep8Naming
def _post_fit(
self, X: pd.DataFrame, y: Optional[pd.Series] = None, **fit_params
) -> None:
# noinspection PyProtectedMember
super()._post_fit(X, y, **fit_params)
self._validate_delegate_attribute(attribute_name=self._ATTR_COMPONENTS)
@property
def _n_components_(self) -> int:
return len(getattr(self.native_estimator, self._ATTR_COMPONENTS))
class FeatureSelectionWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
DF wrapper for feature selection transformers.
The native transformer must implement a ``get_support`` method, providing the
indices of the selected input columns
"""
_ATTR_GET_SUPPORT = "get_support"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_GET_SUPPORT)
def _get_features_out(self) -> pd.Index:
get_support = getattr(self.native_estimator, self._ATTR_GET_SUPPORT)
return self.feature_names_in_[get_support()]
class ColumnTransformerWrapperDF(
TransformerWrapperDF[ColumnTransformer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.compose.ColumnTransformer`.
Requires all transformers passed as the ``transformers`` parameter to implement
:class:`.TransformerDF`.
"""
__DROP = "drop"
__PASSTHROUGH = "passthrough"
__SPECIAL_TRANSFORMERS = (__DROP, __PASSTHROUGH)
def _validate_delegate_estimator(self) -> None:
column_transformer: ColumnTransformer = self.native_estimator
if (
column_transformer.remainder
not in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
):
raise ValueError(
f"unsupported value for arg remainder: ({column_transformer.remainder})"
)
non_compliant_transformers: List[str] = [
type(transformer).__name__
for _, transformer, _ in column_transformer.transformers
if not (
isinstance(transformer, TransformerDF)
or transformer in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
)
]
if non_compliant_transformers:
from .. import ColumnTransformerDF
raise ValueError(
f"{ColumnTransformerDF.__name__} only accepts instances of "
f"{TransformerDF.__name__} or special values "
f'"{" and ".join(ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS)}" '
"as valid transformers, but "
f'also got: {", ".join(non_compliant_transformers)}'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
def _features_original(df_transformer: TransformerDF, columns: List[Any]):
if df_transformer == ColumnTransformerWrapperDF.__PASSTHROUGH:
# we may get positional indices for columns selected by the
# 'passthrough' transformer, and in that case so need to look up the
# associated column names
if all(isinstance(column, int) for column in columns):
column_names = self._get_features_in()[columns]
else:
column_names = columns
return pd.Series(index=column_names, data=column_names)
else:
return df_transformer.feature_names_original_
return pd.concat(
[
_features_original(df_transformer, columns)
for _, df_transformer, columns in self.native_estimator.transformers_
if (
len(columns) > 0
and df_transformer != ColumnTransformerWrapperDF.__DROP
)
]
)
class ImputerWrapperDF(TransformerWrapperDF[T_Imputer], metaclass=ABCMeta):
"""
DF wrapper for imputation transformers, e.g., :class:`sklearn.impute.SimpleImputer`.
"""
def _get_features_original(self) -> pd.Series:
# get the columns that were dropped during imputation
delegate_estimator = self.native_estimator
nan_mask = []
def _nan_mask_from_statistics(stats: np.array):
if issubclass(stats.dtype.type, float):
na_mask = np.isnan(stats)
else:
na_mask = [
x is None or (isinstance(x, float) and np.isnan(x)) for x in stats
]
return na_mask
# implementation for i.e. SimpleImputer
if hasattr(delegate_estimator, "statistics_"):
nan_mask = _nan_mask_from_statistics(stats=delegate_estimator.statistics_)
# implementation for IterativeImputer
elif hasattr(delegate_estimator, "initial_imputer_"):
initial_imputer: SimpleImputer = delegate_estimator.initial_imputer_
nan_mask = _nan_mask_from_statistics(stats=initial_imputer.statistics_)
# implementation for i.e. KNNImputer
elif hasattr(delegate_estimator, "_mask_fit_X"):
# noinspection PyProtectedMember
nan_mask = np.all(delegate_estimator._mask_fit_X, axis=0)
# the imputed columns are all ingoing columns, except the ones that were dropped
imputed_columns = self.feature_names_in_.delete(np.argwhere(nan_mask).tolist())
features_original = pd.Series(
index=imputed_columns, data=imputed_columns.values
)
# if the add_indicator flag is set, we will get additional "missing" columns
if delegate_estimator.add_indicator:
from .. import MissingIndicatorDF
missing_indicator = MissingIndicatorDF.from_fitted(
estimator=delegate_estimator.indicator_,
features_in=self.feature_names_in_,
n_outputs=self.n_outputs_,
)
return features_original.append(missing_indicator.feature_names_original_)
else:
return features_original
class MissingIndicatorWrapperDF(
TransformerWrapperDF[MissingIndicator], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.impute.MissingIndicator`.
"""
def _get_features_original(self) -> pd.Series:
features_original: np.ndarray = self.feature_names_in_[
self.native_estimator.features_
].values
features_out = pd.Index([f"{name}__missing" for name in features_original])
return pd.Series(index=features_out, data=features_original)
class IsomapWrapperDF(BaseDimensionalityReductionWrapperDF[Isomap], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.manifold.Isomap`.
"""
@property
def _n_components_(self) -> int:
return self.native_estimator.embedding_.shape[1]
class AdditiveChi2SamplerWrapperDF(
BaseDimensionalityReductionWrapperDF[AdditiveChi2Sampler], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.kernel_approximation.AdditiveChi2Sampler`.
"""
@property
def _n_components_(self) -> int:
return len(self._features_in) * (2 * self.native_estimator.sample_steps + 1)
class PolynomialFeaturesWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[PolynomialFeatures],
metaclass=ABCMeta,
):
"""
DF wrapper for :class:`sklearn.preprocessing.PolynomialFeatures`.
"""
def _get_features_out(self) -> pd.Index:
return pd.Index(
data=self.native_estimator.get_feature_names(
input_features=self.feature_names_in_.astype(str)
)
)
class OneHotEncoderWrapperDF(TransformerWrapperDF[OneHotEncoder], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.preprocessing.OneHotEncoder`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.sparse:
raise NotImplementedError("sparse matrices not supported; use sparse=False")
def _get_features_original(self) -> pd.Series:
# Return the series mapping output column names to original column names.
#
# Remove 1st category column if argument drop == 'first'
# Remove 1st category column only of binary features if arg drop == 'if_binary'
feature_names_out = pd.Index(
self.native_estimator.get_feature_names(self.feature_names_in_)
)
if self.drop == "first":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in range(len(category) - 1)
]
elif self.drop == "if_binary":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in (range(1) if len(category) == 2 else category)
]
else:
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in category
]
return pd.Series(index=feature_names_out, data=feature_names_in)
class KBinsDiscretizerWrapperDF(
TransformerWrapperDF[KBinsDiscretizer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.preprocessing.KBinsDiscretizer`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.encode == "onehot":
raise NotImplementedError(
'property encode="onehot" is not supported due to sparse matrices;'
'consider using "onehot-dense" instead'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names | |
<reponame>Class-Tooraj/mosaici<gh_stars>0
from __future__ import annotations
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# < IN THE NAME OF GOD > #
# ------------------------------------------ #
__AUTHOR__ = "ToorajJahangiri"
__EMAIL__ = "<EMAIL>"
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< #
# IMPORT
import os
# IMPORT LOCAL
from mosaici.exceptions import *
from mosaici.order import Order, BaseOrder
from mosaici.block import Block, BaseBlock
from mosaici.pattern import Convertor
# IMPORT TYPING
from typing import NamedTuple, Iterator
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\^////////////////////////////// #
# DEFAULT BLOCK ITER TYPE SUPPORT
T_ITER = tuple[int, ...] | list[int]
# TEMPLATE ABSTRACT
class BaseTemplate:
REPEAT: int
SIZE: int
ORDER: BaseOrder
BLOCK: BaseBlock
SEPARATOR: str
DEFAULT_SYMBOL: tuple[str, ...]
def __init__(
self,
blocks: tuple[BaseBlock, ...] = None,
order: BaseOrder | str = None,
default_block: BaseBlock | T_ITER = None,
) -> None:
"""
const:
REPEAT [int]: [Repeat Order Pattern if Use `Order` or `DefaultOrder`].
SIZE [int]: [How Many Block in Template Only if Order is `None`].
ORDER [BaseOrder]: [Order Object Must be instance of BaseOrder].
BLOCK [BaseBlock]: [Block Object Must be instance of BaseBlock].
SEPARATOR [str]: [Seperator Use For `Data to indexes` & `Indexes to Data`].
DEFAULT_SYMBOL [tuple[str, ...]]: [Default Symbol When Want Use Default Order].
args:
blocks [tuple[BaseBlock]]: [Blocks if is `None` Make Blocks For Template] default is None.
order [BaseOrder | str]: [Order for change layout Blocks & Template] default is None.
** order is None means without order pattern. **
** if want use default order use default symbol. **
default_block [Block | T_ITER]: [Use Default Block for Making New Block Reletive With Default block] default is None.
"""
if not self.REPEAT or not self.SIZE or not self.ORDER or not self.DEFAULT_SYMBOL:
raise NotImplemented
self._template = blocks
self._default_block = default_block if default_block is not None else self.BLOCK(order=self.BLOCK.DEFAULT_SYMBOL[0])
self._temp_block = None
if isinstance(order, str) and order not in self.DEFAULT_SYMBOL:
self._order = self.ORDER(order)
else:
self._order = order
if isinstance(self._order, str) and self._order in self.DEFAULT_SYMBOL:
self._order = self.default_order(self.ORDER, self.SIZE)
if self._template is None:
self._make_template()
# Make This Object Iterable
self._current = 0
def _gen_default_block(self) -> BaseBlock:
"""
Make Block When Order is None
return:
[BaseBlock]: [return new created block].
"""
raise NotImplemented
def _make_template(self) -> None:
"""
Create Template
"""
raise NotImplemented
def to_hex(self) -> tuple[list[hex], ...]:
"""
Template To Standard Mosaic Hex
return:
[tuple[list[hex]]]: [Tuple Of Hex Block]
"""
return [i.to_hex() for i in self._template]
def to_bytes(self) -> tuple[list[bytes], ...]:
"""
Template to Bytes
return:
[tuple[list[bytes]]]: [Tuple Of Bytes Block]
"""
return [i.to_bytes() for i in self._template]
def to_bin(self) -> tuple[list[bin], ...]:
"""
Template to Standard Mosaic Binarray Block
return:
[tuple[list[bin]]]: [Tuple Of Binarray Block]
"""
return [i.to_bin() for i in self._template]
def to_oct(self) -> tuple[list[oct], ...]:
"""
Template to Standard Mosaic Octal Block
return:
[tuple[list[oct]]]: [Tuple Of Octal Block]
"""
return [i.to_oct() for i in self._template]
def index(self, block: int, value: int) -> int:
"""
Index Value In Block
args:
block [int]: [Block Number]
value [int]: [Value Target]
return:
[int]: [Indexes of Value in The Block]
"""
block = self.valid_block(block, len(self))
return self._template[block].index(value)
def value(self, block: int, indexes: int) -> bytes:
"""
Value From Indexes in Block
args:
block [int]: [Block Number].
indexes [int]: [Index in Block].
return:
[bytes]: [Value in The Block & Indexes Place]
"""
block = self.valid_block(block, len(self))
return bytes([self._template[block][indexes]])
def data_to_idx(self, data: bytes) -> str:
"""
Data To Indexes
args:
data [bytes]: [Data For Convert To Indexes].
return:
[str]: [Indexes Of Data in Template].
"""
res = []
size = len(self)
for idx_block, i in enumerate(data):
_block = self.valid_block(idx_block, size)
idx = self._template[_block].index(i)
res.append(hex(idx).removeprefix('0x').upper())
return self.SEPARATOR.join(res)
def idx_to_data(self, indexes: str) -> bytes:
"""
Indexes To Data
args:
indexes [str]: [Indexes Value].
return:
[bytes]: [Source Bytes].
"""
indexes = indexes.split(self.SEPARATOR)
res = b''
size = len(self)
for idx_block, i in enumerate(indexes):
_block = self.valid_block(idx_block, size)
value = self._template[_block][int(i, 16)]
res += bytes([value])
return res
def save_template(self, path: str | os.PathLike) -> NamedTuple[int, int, int]:
"""
Save Template Created For Use `Load` Saved Template Use Instance `BaseFileTemplate` Modules
args:
path [str|PathLike]: [File Path For Save Template]
return:
[NamedTuple[int, int, int]]: [Raw Bin Write, Block Count Write, Block Count Member]
** 'SaveInfo('write'= 65536, 'block'= 256, 'member'= 256)'
"""
# Create Named Tuple For Return Saved
_saved = NamedTuple('SavedInfo', (('write', int), ('block', int), ('member', int)))
path = os.path.realpath(path)
# Block Convert To Bytes
to_bytes = (
b''.join(block.to_bytes())
for block in self
)
# Open File With Write Binarray Mode
with open(path, 'wb') as f:
saved_bin = f.write(b''.join(to_bytes))
# Get Member Of Block - All Block Must Be Same Member
_member = len(self._template[0])
# Return SavedInfo - [Count Write Bin Into, Count Write Block, Count Block Member]
return _saved(saved_bin, (saved_bin // _member), _member)
def get_valid_block(self, block_idx: int) -> BaseBlock:
"""
Get Valid Block
Block Index Validate Before Getting Block.
args:
block_idx[int]: [Get Index Of Block].
return:
[BaseBlock]: [Block With Index].
"""
# Validate Index Block With `valid_block()` Static Method.
_block_idx = self.valid_block(block_idx, len(self))
return self[_block_idx]
def __iter__(self) -> Iterator[BaseBlock]:
"""
Make Object Iterable
"""
self._current = 0
return self
def __next__(self) -> BaseBlock:
"""
Next Block
"""
try:
get = self._template[self._current]
self._current += 1
return get
except IndexError:
raise StopIteration
def __enter__(self) -> object:
return self
def __len__(self) -> int:
"""
Length Of Template
"""
return len(self._template)
def __getitem__(self, block_idx: int) -> BaseBlock:
"""
Get Block From Indexes
"""
return self._template[block_idx]
def __eq__(self, other: object) -> bool:
if isinstance(other, BaseTemplate):
return self._template == other._template
raise NotImplementedError
def __nq__(self, other: object) -> bool:
if isinstance(other, BaseTemplate):
return self._template != other._template
raise NotImplementedError
def __gt__(self, other: object) -> bool:
if isinstance(other, BaseTemplate):
return len(self) > len(other)
raise NotImplementedError
def __lt__(self, other: object) -> bool:
if isinstance(other, BaseTemplate):
return len(self) < len(other)
raise NotImplementedError
def __ge__(self, other: object) -> bool:
if isinstance(other, BaseTemplate):
return len(self) >= len(other)
raise NotImplementedError
def __le__(self, other: object) -> bool:
if isinstance(other, BaseTemplate):
return len(self) <= len(other)
raise NotImplementedError
def __contains__(self, value: int) -> bool:
"""
All Blocks Must Be Same Value Only Value Places is Diffrent
This Check if Value in First Block If True Means Value Existed In All Template Blocks
args:
value [int]: [Target Value].
return:
[bool]: [value contains template True OtherWise False]
"""
return value in self[0]
def __repr__(self) -> str:
"""
Some Info With This Format
'ObjectName(manyblocks, repeat, order)'
"""
return f"{type(self).__qualname__}(blocks={len(self)}, repeat={self.REPEAT}, order={self._order})"
def __str__(self) -> str:
"""
Template To String With This Format - StringDict
'{block_index: block, ...}'
"""
to_str = (f"{n}: {str(i)}" for n,i in enumerate(self._template))
return f"{{{', '.join(to_str)}}}"
def __exit__(self, *_) -> None:
try:
pass
finally:
del self
@staticmethod
def default_order(order_obj: BaseOrder, size: int) -> BaseOrder:
"""
Created Default Order
args:
order_object [BaseOrder]: [self.ORDER Use When This Method Calls]
size [int]: [self.SIZE Use When This Method Calls]
"""
raise NotImplemented
@staticmethod
def valid_block(idx_block: int, size: int) -> int:
"""
Validate Block Number From Template
When Block Indexes Bigger Than Blocks Validate Block Number.
args:
idx_block [int]: [Number Of Block].
return:
[int]: [Block if Exists OtherWise Converted To Existed Block].
"""
# I Changed Algorithm - Fixed Problem Maximum Recursion & Very Low Speed For Working
while idx_block >= size:
if idx_block == size:
idx_block -= (size >> 1)
continue
elif idx_block <= (size + (size//2)):
idx_block <<= 1
continue
elif idx_block >= (size * 2):
idx_block //= size
continue
else:
idx_block -= size
continue
return abs(idx_block)
# FILE TEMPLATE ABSTRACT
class BaseFileTemplate(BaseTemplate):
SEPARATOR: str
BLOCK: BaseBlock
def __init__(self, path: str | os.PathLike, block_member: int = 256) -> None:
"""
FileTemplate Load Saved Template.
** This Module Not Support Created Template Only For Loading And Use Saved Template.
** NOTE: For Copy Template Is Already Loaded Use `save_template()` module.
const:
SEPARATOR [str]: [Seperator Use For `Data to indexes` & `Indexes to Data`].
BLOCK [BaseBlock]: [Block Object Must be instance of BaseBlock].
args:
path [str|PathLike]: [Path Saved Template File].
block_member [int]: [Count Of Member In Blocks] default is `256`.
** NOTE: After Done Use This Module Must Be Closed File with `close()` method.
** NOTE: Use `with` Statement For Closing File Automatic After Done.
"""
if not | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# test/test_runner.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2009 <NAME> <<EMAIL>>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Unit test for runner module.
"""
import __builtin__
import os
import sys
import tempfile
import errno
import signal
import scaffold
from test_pidlockfile import (
FakeFileDescriptorStringIO,
setup_pidfile_fixtures,
make_pidlockfile_scenarios,
setup_lockfile_method_mocks,
)
from test_daemon import (
setup_streams_fixtures,
)
import daemon.daemon
from daemon import pidlockfile
from daemon import runner
class Exception_TestCase(scaffold.Exception_TestCase):
""" Test cases for module exception classes. """
def __init__(self, *args, **kwargs):
""" Set up a new instance. """
super(Exception_TestCase, self).__init__(*args, **kwargs)
self.valid_exceptions = {
runner.DaemonRunnerError: dict(
min_args = 1,
types = (Exception,),
),
runner.DaemonRunnerInvalidActionError: dict(
min_args = 1,
types = (runner.DaemonRunnerError, ValueError),
),
runner.DaemonRunnerStartFailureError: dict(
min_args = 1,
types = (runner.DaemonRunnerError, RuntimeError),
),
runner.DaemonRunnerStopFailureError: dict(
min_args = 1,
types = (runner.DaemonRunnerError, RuntimeError),
),
}
def make_runner_scenarios():
""" Make a collection of scenarios for testing DaemonRunner instances. """
pidlockfile_scenarios = make_pidlockfile_scenarios()
scenarios = {
'simple': {
'pidlockfile_scenario_name': 'simple',
},
'pidfile-locked': {
'pidlockfile_scenario_name': 'exist-other-pid-locked',
},
}
for scenario in scenarios.values():
if 'pidlockfile_scenario_name' in scenario:
pidlockfile_scenario = pidlockfile_scenarios.pop(
scenario['pidlockfile_scenario_name'])
scenario['pid'] = pidlockfile_scenario['pid']
scenario['pidfile_path'] = pidlockfile_scenario['path']
scenario['pidfile_timeout'] = 23
scenario['pidlockfile_scenario'] = pidlockfile_scenario
return scenarios
def set_runner_scenario(testcase, scenario_name, clear_tracker=True):
""" Set the DaemonRunner test scenario for the test case. """
scenarios = testcase.runner_scenarios
testcase.scenario = scenarios[scenario_name]
set_pidlockfile_scenario(
testcase, testcase.scenario['pidlockfile_scenario_name'])
if clear_tracker:
testcase.mock_tracker.clear()
def set_pidlockfile_scenario(testcase, scenario_name):
""" Set the PIDLockFile test scenario for the test case. """
scenarios = testcase.pidlockfile_scenarios
testcase.pidlockfile_scenario = scenarios[scenario_name]
setup_lockfile_method_mocks(
testcase, testcase.pidlockfile_scenario,
testcase.lockfile_class_name)
def setup_runner_fixtures(testcase):
""" Set up common test fixtures for DaemonRunner test case. """
testcase.mock_tracker = scaffold.MockTracker()
setup_pidfile_fixtures(testcase)
setup_streams_fixtures(testcase)
testcase.runner_scenarios = make_runner_scenarios()
testcase.mock_stderr = FakeFileDescriptorStringIO()
scaffold.mock(
"sys.stderr",
mock_obj=testcase.mock_stderr,
tracker=testcase.mock_tracker)
simple_scenario = testcase.runner_scenarios['simple']
testcase.lockfile_class_name = "pidlockfile.TimeoutPIDLockFile"
testcase.mock_runner_lock = scaffold.Mock(
testcase.lockfile_class_name,
tracker=testcase.mock_tracker)
testcase.mock_runner_lock.path = simple_scenario['pidfile_path']
scaffold.mock(
testcase.lockfile_class_name,
returns=testcase.mock_runner_lock,
tracker=testcase.mock_tracker)
class TestApp(object):
def __init__(self):
self.stdin_path = testcase.stream_file_paths['stdin']
self.stdout_path = testcase.stream_file_paths['stdout']
self.stderr_path = testcase.stream_file_paths['stderr']
self.pidfile_path = simple_scenario['pidfile_path']
self.pidfile_timeout = simple_scenario['pidfile_timeout']
run = scaffold.Mock(
"TestApp.run",
tracker=testcase.mock_tracker)
testcase.TestApp = TestApp
scaffold.mock(
"daemon.runner.DaemonContext",
returns=scaffold.Mock(
"DaemonContext",
tracker=testcase.mock_tracker),
tracker=testcase.mock_tracker)
testcase.test_app = testcase.TestApp()
testcase.test_program_name = "bazprog"
testcase.test_program_path = (
"/foo/bar/%(test_program_name)s" % vars(testcase))
testcase.valid_argv_params = {
'start': [testcase.test_program_path, 'start'],
'stop': [testcase.test_program_path, 'stop'],
'restart': [testcase.test_program_path, 'restart'],
}
def mock_open(filename, mode=None, buffering=None):
if filename in testcase.stream_files_by_path:
result = testcase.stream_files_by_path[filename]
else:
result = FakeFileDescriptorStringIO()
result.mode = mode
result.buffering = buffering
return result
scaffold.mock(
"__builtin__.open",
returns_func=mock_open,
tracker=testcase.mock_tracker)
scaffold.mock(
"os.kill",
tracker=testcase.mock_tracker)
scaffold.mock(
"sys.argv",
mock_obj=testcase.valid_argv_params['start'],
tracker=testcase.mock_tracker)
testcase.test_instance = runner.DaemonRunner(testcase.test_app)
testcase.scenario = NotImplemented
class DaemonRunner_TestCase(scaffold.TestCase):
""" Test cases for DaemonRunner class. """
def setUp(self):
""" Set up test fixtures. """
setup_runner_fixtures(self)
set_runner_scenario(self, 'simple')
scaffold.mock(
"runner.DaemonRunner.parse_args",
tracker=self.mock_tracker)
self.test_instance = runner.DaemonRunner(self.test_app)
def tearDown(self):
""" Tear down test fixtures. """
scaffold.mock_restore()
def test_instantiate(self):
""" New instance of DaemonRunner should be created. """
self.failUnlessIsInstance(self.test_instance, runner.DaemonRunner)
def test_parses_commandline_args(self):
""" Should parse commandline arguments. """
expect_mock_output = """\
Called runner.DaemonRunner.parse_args()
...
"""
self.failUnlessMockCheckerMatch(expect_mock_output)
def test_has_specified_app(self):
""" Should have specified application object. """
self.failUnlessIs(self.test_app, self.test_instance.app)
def test_sets_pidfile_none_when_pidfile_path_is_none(self):
""" Should set ‘pidfile’ to ‘None’ when ‘pidfile_path’ is ‘None’. """
pidfile_path = None
self.test_app.pidfile_path = pidfile_path
expect_pidfile = None
instance = runner.DaemonRunner(self.test_app)
self.failUnlessIs(expect_pidfile, instance.pidfile)
def test_error_when_pidfile_path_not_string(self):
""" Should raise ValueError when PID file path not a string. """
pidfile_path = object()
self.test_app.pidfile_path = pidfile_path
expect_error = ValueError
self.failUnlessRaises(
expect_error,
runner.DaemonRunner, self.test_app)
def test_error_when_pidfile_path_not_absolute(self):
""" Should raise ValueError when PID file path not absolute. """
pidfile_path = "foo/bar.pid"
self.test_app.pidfile_path = pidfile_path
expect_error = ValueError
self.failUnlessRaises(
expect_error,
runner.DaemonRunner, self.test_app)
def test_creates_lock_with_specified_parameters(self):
""" Should create a TimeoutPIDLockFile with specified params. """
pidfile_path = self.scenario['pidfile_path']
pidfile_timeout = self.scenario['pidfile_timeout']
lockfile_class_name = self.lockfile_class_name
expect_mock_output = """\
...
Called %(lockfile_class_name)s(
%(pidfile_path)r,
%(pidfile_timeout)r)
""" % vars()
scaffold.mock_restore()
self.failUnlessMockCheckerMatch(expect_mock_output)
def test_has_created_pidfile(self):
""" Should have new PID lock file as `pidfile` attribute. """
expect_pidfile = self.mock_runner_lock
instance = self.test_instance
self.failUnlessIs(
expect_pidfile, instance.pidfile)
def test_daemon_context_has_created_pidfile(self):
""" DaemonContext component should have new PID lock file. """
expect_pidfile = self.mock_runner_lock
daemon_context = self.test_instance.daemon_context
self.failUnlessIs(
expect_pidfile, daemon_context.pidfile)
def test_daemon_context_has_specified_stdin_stream(self):
""" DaemonContext component should have specified stdin file. """
test_app = self.test_app
expect_file = self.stream_files_by_name['stdin']
daemon_context = self.test_instance.daemon_context
self.failUnlessEqual(expect_file, daemon_context.stdin)
def test_daemon_context_has_stdin_in_read_mode(self):
""" DaemonContext component should open stdin file for read. """
expect_mode = 'r'
daemon_context = self.test_instance.daemon_context
self.failUnlessIn(daemon_context.stdin.mode, expect_mode)
def test_daemon_context_has_specified_stdout_stream(self):
""" DaemonContext component should have specified stdout file. """
test_app = self.test_app
expect_file = self.stream_files_by_name['stdout']
daemon_context = self.test_instance.daemon_context
self.failUnlessEqual(expect_file, daemon_context.stdout)
def test_daemon_context_has_stdout_in_append_mode(self):
""" DaemonContext component should open stdout file for append. """
expect_mode = 'w+'
daemon_context = self.test_instance.daemon_context
self.failUnlessIn(daemon_context.stdout.mode, expect_mode)
def test_daemon_context_has_specified_stderr_stream(self):
""" DaemonContext component should have specified stderr file. """
test_app = self.test_app
expect_file = self.stream_files_by_name['stderr']
daemon_context = self.test_instance.daemon_context
self.failUnlessEqual(expect_file, daemon_context.stderr)
def test_daemon_context_has_stderr_in_append_mode(self):
""" DaemonContext component should open stderr file for append. """
expect_mode = 'w+'
daemon_context = self.test_instance.daemon_context
self.failUnlessIn(daemon_context.stderr.mode, expect_mode)
def test_daemon_context_has_stderr_with_no_buffering(self):
""" DaemonContext component should open stderr file unbuffered. """
expect_buffering = 0
daemon_context = self.test_instance.daemon_context
self.failUnlessEqual(
expect_buffering, daemon_context.stderr.buffering)
class DaemonRunner_usage_exit_TestCase(scaffold.TestCase):
""" Test cases for DaemonRunner.usage_exit method. """
def setUp(self):
""" Set up test fixtures. """
setup_runner_fixtures(self)
set_runner_scenario(self, 'simple')
def tearDown(self):
""" Tear down test fixtures. """
scaffold.mock_restore()
def test_raises_system_exit(self):
""" Should raise SystemExit exception. """
instance = self.test_instance
argv = [self.test_program_path]
self.failUnlessRaises(
SystemExit,
instance._usage_exit, argv)
def test_message_follows_conventional_format(self):
""" Should emit a conventional usage message. """
instance = self.test_instance
progname = self.test_program_name
argv = [self.test_program_path]
expect_stderr_output = """\
usage: %(progname)s ...
""" % vars()
self.failUnlessRaises(
SystemExit,
instance._usage_exit, argv)
self.failUnlessOutputCheckerMatch(
expect_stderr_output, self.mock_stderr.getvalue())
class DaemonRunner_parse_args_TestCase(scaffold.TestCase):
""" Test cases for DaemonRunner.parse_args method. """
def setUp(self):
""" Set up test fixtures. """
setup_runner_fixtures(self)
set_runner_scenario(self, 'simple')
scaffold.mock(
"daemon.runner.DaemonRunner._usage_exit",
raises=NotImplementedError,
tracker=self.mock_tracker)
def tearDown(self):
""" Tear down test fixtures. """
scaffold.mock_restore()
def test_emits_usage_message_if_insufficient_args(self):
""" Should emit a usage message and exit if too few arguments. """
instance = self.test_instance
argv = [self.test_program_path]
expect_mock_output = """\
Called daemon.runner.DaemonRunner._usage_exit(%(argv)r)
""" % vars()
try:
instance.parse_args(argv)
except NotImplementedError:
pass
self.failUnlessMockCheckerMatch(expect_mock_output)
def test_emits_usage_message_if_unknown_action_arg(self):
""" Should emit a usage message and exit if unknown action. """
instance = self.test_instance
progname = self.test_program_name
argv = [self.test_program_path, 'bogus']
expect_mock_output = """\
Called daemon.runner.DaemonRunner._usage_exit(%(argv)r)
""" % vars()
try:
instance.parse_args(argv)
except NotImplementedError:
pass
self.failUnlessMockCheckerMatch(expect_mock_output)
def test_should_parse_system_argv_by_default(self):
""" Should parse sys.argv by default. """
instance = self.test_instance
expect_action = 'start'
argv = self.valid_argv_params['start']
scaffold.mock(
"sys.argv",
mock_obj=argv,
tracker=self.mock_tracker)
instance.parse_args()
self.failUnlessEqual(expect_action, instance.action)
def test_sets_action_from_first_argument(self):
""" Should set action from first commandline argument. """
instance = self.test_instance
for name, argv in self.valid_argv_params.items():
expect_action = name
instance.parse_args(argv)
self.failUnlessEqual(expect_action, instance.action)
class DaemonRunner_do_action_TestCase(scaffold.TestCase):
""" Test cases for DaemonRunner.do_action method. """
def setUp(self):
""" Set up test fixtures. """
setup_runner_fixtures(self)
set_runner_scenario(self, 'simple')
def tearDown(self):
""" Tear down test fixtures. """
scaffold.mock_restore()
def test_raises_error_if_unknown_action(self):
""" Should emit a usage message and exit if action is unknown. """
instance = self.test_instance
instance.action = 'bogus'
expect_error = runner.DaemonRunnerInvalidActionError
self.failUnlessRaises(
expect_error,
instance.do_action)
class DaemonRunner_do_action_start_TestCase(scaffold.TestCase):
""" Test cases for DaemonRunner.do_action method, action 'start'. """
def setUp(self):
""" Set up test fixtures. """
setup_runner_fixtures(self)
set_runner_scenario(self, 'simple')
self.test_instance.action = 'start'
def tearDown(self):
""" Tear down test fixtures. """
scaffold.mock_restore()
def test_raises_error_if_pidfile_locked(self):
""" Should raise error if PID file is locked. """
set_pidlockfile_scenario(self, 'exist-other-pid-locked')
instance = self.test_instance
instance.daemon_context.open.mock_raises = (
pidlockfile.AlreadyLocked)
pidfile_path = self.scenario['pidfile_path']
expect_error = runner.DaemonRunnerStartFailureError
expect_message_content = pidfile_path
try:
instance.do_action()
except expect_error, exc:
pass
else:
raise self.failureException(
"Failed to raise " + expect_error.__name__)
self.failUnlessIn(exc.message, expect_message_content)
def test_breaks_lock_if_no_such_process(self):
""" Should request breaking lock if PID file process is not running. """
set_runner_scenario(self, 'pidfile-locked')
instance = self.test_instance
self.mock_runner_lock.read_pid.mock_returns = (
self.scenario['pidlockfile_scenario']['pidfile_pid'])
pidfile_path = self.scenario['pidfile_path']
test_pid = self.scenario['pidlockfile_scenario']['pidfile_pid']
expect_signal = signal.SIG_DFL
error = OSError(errno.ESRCH, "Not running")
os.kill.mock_raises = error
lockfile_class_name = self.lockfile_class_name
expect_mock_output = """\
...
Called os.kill(%(test_pid)r, %(expect_signal)r)
Called %(lockfile_class_name)s.break_lock()
...
""" % vars()
instance.do_action()
scaffold.mock_restore()
self.failUnlessMockCheckerMatch(expect_mock_output)
def test_requests_daemon_context_open(self):
""" Should request the daemon context to open. """
instance = self.test_instance
expect_mock_output = """\
...
Called DaemonContext.open()
...
"""
instance.do_action()
self.failUnlessMockCheckerMatch(expect_mock_output)
def test_emits_start_message_to_stderr(self):
""" Should emit start message to stderr. """
instance = self.test_instance
current_pid = self.scenario['pid']
expect_stderr = """\
started with pid %(current_pid)d
""" % vars()
instance.do_action()
self.failUnlessOutputCheckerMatch(
expect_stderr, self.mock_stderr.getvalue())
def test_requests_app_run(self):
""" Should request the application to run. | |
for iz in range(1, len(col) - 1):
if col[iz] > thresh and col[iz + 1] > thresh:
return Z[iz] + (Z[iz - 1] - Z[iz]) * (col[iz] - thresh) / (
col[iz] - col[iz - 1]
)
return np.nan
def _mld_chunk(chunk, Z=None, smoothed=False, threshold=None):
if chunk.ndim > 1:
result = np.empty(chunk.shape[:-1])
for idxs in np.ndindex(chunk.shape[:-1]):
# MITgcm saves in > byte order, which numba doesn't support
col = abs(chunk[idxs]).astype(float)
if smoothed:
col = savgol_filter(col, 5, 4)
result[idxs] = _mld(col, Z, threshold)
return result
if smoothed:
col = savgol_filter(chunk, 5, 4)
return _mld(col, Z, threshold)
@add_diag("mld", needs="$var")
def mixed_layer_depth(
d, g, var=None, threshold=0.02, smoothed=False, dask="parallelized", **roll_kwargs
):
"""
Compute the Mixed Layer Depth as the first depth that some variable's vertical derivative exceeds a threshold.
The first layer is exluded from the computation.
PARAMETERS
----------
threshold : absolute value of threshold of the vertical derivative of "var" higher of which it is considered a ML
var : variable to look at, default is Density (from add_diag density)
smooted : If True, applies a 5-levels 4th order Savitzky-Golay filter to each columns.
roll_kwargs : Mapping of dim : window size for rolling averages done on the vertical derivative field before.
"""
Dz = g.diff(d[var], "Z", boundary="fill", fill_value=np.nan)
for dim, win in roll_kwargs.items():
Dz = Dz.rolling(dim={dim: win}, center=True, min_periods=1).mean()
Z = _find_coord(Dz, "Z")
if dask == "parallelized":
Dz = Dz.chunk({Z: -1})
# MITgcm saves in > byte order, which numba doesn't support
mld = xr.apply_ufunc(
_mld_chunk,
Dz,
input_core_dims=[(Z,)],
dask=dask,
output_dtypes=[float],
kwargs=dict(
Z=d[Z].values.astype(float), smoothed=smoothed, threshold=threshold
),
)
mld.attrs.update(
name="MLD",
long_name="Mixed Layer Depth",
description=f"Computed from {var} with dF/dz >= {threshold}",
units=d[Z].attrs.get("units", "m"),
)
return mld
@add_diag("GSW", needs="$varargs")
def from_gsw(d, g, func=None, varargs=None, dask="parallelized", **kwargs):
"""Wrapper fo any diagnostic available in gsw."""
if GSWVARS is None:
raise NotImplementedError(
"Package GSW is missing, thus from_gsw cannot be implemented."
)
if isinstance(func, str):
func = getattr(gsw, func)
if varargs is None:
varargs = map(
GSWVARS.get,
[
k
for k, v in ins.signature(func).parameters.items()
if v.default is ins._empty
],
)
N = -1
for line in map(str.strip, func.__doc__.split("\n")):
if "Returns" in line:
N = 3
elif N == 1:
name, units = line.split(":")
name = name.strip()
units = units.split(",")[-1].strip()
elif N == 0:
long_name = line
break
N -= 1
data = xr.apply_ufunc(
func,
*[d[var] for var in varargs],
kwargs=kwargs,
dask=dask,
output_dtypes=[float],
)
data.attrs.update({"long_name": long_name, "units": units, "name": name})
return data
def _interp1DAt(var, coord, X=None):
if var.ndim > 1:
result = np.empty(var.shape[:-1])
for idxs in np.ndindex(var.shape[:-1]):
result[idxs] = _interp1DAt(var[idxs], coord[idxs], X=X)
return result
return interp1d(X, var)(coord)
@add_diag("InterpAt", needs="$varargs")
def interp_at(d, g, varargs=None, dim=None, dask="parallelized"):
"""
Interpolates a variable to another.
Example : varargs = [THETA, mld] : THETA(t, z, y, x) is interpolated with Z=mld(t, y, x)
"""
var, coordvar = varargs
dim = (
dim if dim is not None else set(d[var].dims).difference(d[coordvar].dims).pop()
)
X = d[dim].values
data = xr.apply_ufunc(
_interp1DAt,
d[var],
d[coordvar],
input_core_dims=[[dim], []],
dask=dask,
output_dtypes=[float],
kwargs={"X": X},
keep_attrs=True,
)
data.attrs.update(
long_name=d[var].attrs.get("long_name", var)
+ " interpolated to {} along {}".format(coordvar, dim),
name="{}_{}_{}".format(var, dim, coordvar),
)
return data
@add_diag("Roll_dataset", dataset=True)
def roll_dataset(d, g, roll_dim="X", U=-0.05):
"""Rolls a whole datasets, rolls on roll_dim of amount U*t for each t."""
for varname in d.variables.keys():
X = _find_coord(d[varname], roll_dim)
if (X is not None) and ("time" in d[varname].dims):
time_sec = d.time.values.astype("timedelta64[s]")
for it, tim in enumerate(time_sec):
d[varname][it, :] = (
d[varname]
.isel(time=it)
.roll(shifts={X: (tim * U).astype(int)}, roll_coords=False)
)
d[varname].attrs.update(
description=d[varname].attrs.get("description", "")
+ "Rolled on {} by {} m/s".format(X, U)
)
return d
def list_diagnostics():
"""List all diagnostics, giving info about the needed fields, arguments and docstrings."""
outStr = (
COLORS.LIGHTBLUE + COLORS.BOLD + "Available diagnostics:" + COLORS.ENDC + "\n"
)
for diag in DIAGNOSTICS.values():
outStr += COLORS.YELLOW + diag["name"] + COLORS.ENDC + "\n"
outStr += (
COLORS.BLUE
+ "\tDiagnostics needed (either raw or computed before): "
+ COLORS.ENDC
)
if isinstance(diag["needs"], str):
outStr += "Given by argument : " + diag["needs"][1:]
elif diag["needs"] is None:
outStr += "None"
else:
outStr += ", ".join(diag["needs"])
outStr += "\n"
outStr += (
COLORS.BLUE
+ "\tExtra arguments: "
+ COLORS.ENDC
+ ", ".join(diag["kwarglist"])
+ "\n"
)
outStr += (
COLORS.PURPLE
+ "\tDoc-string:"
+ COLORS.ENDC
+ "\n\t".join(
diag["func"].__doc__.split("\n")
+ ([] if diag["parent"] is None else diag["parent"].__doc__.split("\n"))
)
)
outStr += "\n"
return outStr
def _get_output_specs(outputs, input_file):
"""Parse the output specs given to diagnose."""
specs = []
for output in outputs:
means = []
coord_slices = {}
idx_slices = {}
*params, outfile = output.split(",")
for spec in params:
coord = "X" if "X" in spec else ("Y" if "Y" in spec else "Z")
mean, slicespec = spec.split(coord)
if ":" in slicespec:
start, end, *step = slicespec.split(":")
start = (
None
if not start
else (float(start) if "." in start else int(start))
)
end = None if not end else (float(end) if "." in end else int(start))
step = None if len(step) == 0 else int(step[0])
if isinstance(start, float) or isinstance(end, float):
coord_slices.update(
{
cname: slice(start, end, step)
for cname in MITgcm_COORD_NAMES[coord]
}
)
else:
idx_slices.update(
{
cname: slice(start, end, step)
for cname in MITgcm_COORD_NAMES[coord]
}
)
if mean == "m":
means.extend(MITgcm_COORD_NAMES[coord])
if outfile.startswith("_"):
outfile = input_file.stem + outfile
if not outfile.endswith(".nc"):
outfile += ".nc"
specs.append([coord_slices, idx_slices, means, outfile])
return specs
def _get_index_of_diag(diaglist, diagname):
"""Helper function to get index"""
for i, d in enumerate(diaglist):
if d["name"] == diagname:
return i
return None
def _sort_diagnostics(diaglist, dataset, v=print):
"""Sort a list of diagnostics, inserting dependencies and solving cycles"""
Npre = 0
for N in range(100): # A bit ugly, we limit to 100 iterations
done = True
for i in range(len(diaglist)):
if diaglist[i]["dataset"]: # Go through "dataset-diags" first
if diaglist[i]["order"] == "pre":
done = False
diaglist.insert(Npre, diaglist.pop(i))
Npre += 1
elif diaglist[i]["order"] == "post":
done = False
diaglist.append(diaglist.pop(i))
diaglist[i]["order"] = "solved" # So we don't loop again on them
if isinstance(diaglist[i]["needs"], str) and diaglist[i][
"needs"
].startswith("$"):
# Dependencies given by a kwarg, silent if the kwarg wasn't given : diags should take care of this.
done = False
needed = diaglist[i]["kwargs"].get(diaglist[i]["needs"][1:])
diaglist[i]["needs"] = [needed] if isinstance(needed, str) else needed
if diaglist[i]["needs"] is not None:
# There are some dependencies
for needed in diaglist[i]["needs"]:
idxof = _get_index_of_diag(diaglist, needed)
if idxof is None: # needed wasn't found in current diaglist
if (
needed in DIAGNOSTICS
): # Is it a Diag?, if yes, instert it before current diag
done = False
diaglist.insert(i, DIAGNOSTICS[needed].copy())
v(f'Adding {needed} before {diaglist[i + 1]["name"]}', 3)
elif needed in dataset: # Is it a raw variable?
v(f"From dataset: {needed}", 3)
else:
raise ValueError(
f"Unable to find dependency {needed} in dataset or available diagnostics."
)
elif idxof > i: # It is listed, but in the wrong order.
done = False
diaglist.insert(i, diaglist.pop(idxof))
v(f'Pushing {needed} before {diaglist[i + 1]["name"]}', 3)
if done: # A complete iteration was done without changes
v(f"Done ordering diagnostics after {N} iterations")
v("\n".join([f"{d['name']} (Needs: {d['needs']})" for d in diaglist]), 3)
break
else: # Exceeded the max iterations
v([f"{d['name']} (Needs: {d['needs']})" for d in diaglist], 3)
raise RecursionError("Unable to solve the depencies properly.")
return diaglist
def diagnose():
"""Entry-point for the command-line interface."""
parser = argparse.ArgumentParser(
description=(
"CLI interface to compute complementary diagnostics on MITgcm data.\n"
"Multiple diagnostics can be computed and saved in the same netCDF dataset.\n"
)
)
parser.add_argument(
"-l",
"--list",
help=("Lists all available diagnostics and their options."),
action="store_true",
)
parser.add_argument(
"-i",
"--input",
help=(
"Input MITgcm run folder and prefix, or netCDF file. Defaults to current dir.\n"
"Given as 'folder:prefix' or 'file.nc'. If no prefix is given, run_folder is called with merge_full=True."
),
default=".",
)
parser.add_argument(
"-o",
"--output",
help=(
'Output netCDF file name. If it starts with "_", the input file/folder name is prepended\n'
"Multiple output datasets are possible. Can be saved with slices or means as : [[m]Xi:j:n][Y...],file.nc"
"If i or j have a ., they are treated as coordinates, if not as indexes (ints)"
),
nargs="+",
)
parser.add_argument(
"--dask",
help="Control dask behavior for all diagnostics where it is possible (where `dask` | |
projection_dim=50, projection_hidden=0, projection_dropout=0.2,
compare_dim=288, compare_dropout=0.2,
dense_dim=50, dense_dropout=0.2,
lr=1e-3, activation='relu'):
q1 = Input(shape=(max_sequence_length,), name='first_sentences')
q2 = Input(shape=(max_sequence_length,), name='second_sentences')
meta_features_input = Input(shape=(36,), name='mata-features')
q1_exact_match = Input(shape=(max_sequence_length,), name='first_exact_match')
q2_exact_match = Input(shape=(max_sequence_length,), name='second_exact_match')
embedding = Embedding(nb_words, 150,
weights=[embedding_matrix],
input_length=max_sequence_length,
trainable=False)
flex_embedding = Embedding(nb_words, 20,
input_length=max_sequence_length,
trainable=True)
em_embeddings = Reshape((max_sequence_length, 1))
q1_embed = Concatenate()([embedding(q1), em_embeddings(q1_exact_match),])
q1_encoded = SpatialDropout1D(0.2)(q1_embed)
q2_embed = Concatenate()([embedding(q2), em_embeddings(q2_exact_match),])
q2_encoded = SpatialDropout1D(0.2)(q2_embed)
nb_filters = 64
cnns = [Conv1D(64, 1, strides=1, padding='same', activation='relu') for i in range(3)]
gates_cnns = [Conv1D(nb_filters, 3, dilation_rate=1, padding='same', activation='tanh') for i in range(3)]
sigm_cnns = [Conv1D(nb_filters, 3, dilation_rate=1, padding='same', activation='sigmoid') for i in range(3)]
for i in range(len(cnns)):
drop = Dropout(0.1)
q1_t = gates_cnns[i](q1_encoded)
q2_t = gates_cnns[i](q2_encoded)
q1_s = sigm_cnns[i](q1_encoded)
q2_s = sigm_cnns[i](q2_encoded)
q1_x = Multiply()([q1_t, q1_s])
q1_x = cnns[i](q1_x)
q1_x = drop(q1_x)
q2_x = Multiply()([q2_t, q2_s])
q2_x = cnns[i](q2_x)
q2_x = drop(q2_x)
q1_aligned, q2_aligned = soft_attention_alignment(q1_x, q2_x)
q1_encoded = Concatenate()([q1_x, q2_aligned, q1_encoded])
q2_encoded = Concatenate()([q2_x, q1_aligned, q2_encoded])
attn = AttentionWeightedAverage()
q1_rep = apply_multiple(q1_encoded, [GlobalAvgPool1D(), GlobalMaxPool1D(), attn])
q2_rep = apply_multiple(q2_encoded, [GlobalAvgPool1D(), GlobalMaxPool1D(), attn])
# Classifier
q_diff = substract(q1_rep, q2_rep)
q_multi = Multiply()([q1_rep, q2_rep])
h_all = Concatenate()([q1_rep, q2_rep, q_diff, q_multi,])
h_all = Dropout(0.2)(h_all)
h_all = BatchNormalization()(h_all)
h_all = Dense(64, activation='relu', kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(1e-6))(h_all)
out_ = Dense(3, activation='softmax')(h_all)
model = Model(inputs=[q1, q2, meta_features_input, q1_exact_match, q2_exact_match], outputs=out_)
model.compile(optimizer=Adam(lr=lr, decay=1e-6, clipnorm=1.5), loss='categorical_crossentropy',
metrics=['accuracy', weighted_accuracy])
model.summary()
return model
def get_darnn(nb_words, embedding_dim, embedding_matrix, max_sequence_length, out_size,
projection_dim=50, projection_hidden=0, projection_dropout=0.2,
compare_dim=288, compare_dropout=0.2,
dense_dim=50, dense_dropout=0.2,
lr=1e-3, activation='relu'):
q1 = Input(shape=(max_sequence_length,), name='first_sentences')
q2 = Input(shape=(max_sequence_length,), name='second_sentences')
q1_exact_match = Input(shape=(max_sequence_length,), name='first_exact_match')
q2_exact_match = Input(shape=(max_sequence_length,), name='second_exact_match')
input_layer_3 = Input(shape=(36,), name='mata-features', dtype="float32")
embedding = Embedding(nb_words, embedding_dim,
weights=[embedding_matrix],
input_length=max_sequence_length,
trainable=False)
em_embeddings = Embedding(2, 1,
input_length=max_sequence_length,
trainable=True)
q1_embed = embedding(q1)
q1_embed = SpatialDropout1D(0.1)(q1_embed)
q2_embed = embedding(q2)
q2_embed = SpatialDropout1D(0.1)(q2_embed)
th = TimeDistributed(Highway(activation='relu'))
q1_embed = Dropout(0.1)(th(q1_embed,))
q2_embed = Dropout(0.1)(th(q2_embed,))
rnns = [Bidirectional(CuDNNGRU(42, return_sequences=True)) for i in range(3)]
q1_res = []
q2_res = []
for idx, rnn in enumerate(rnns):
q1_seq = rnn(q1_embed)
q1_seq = Dropout(0.15)(q1_seq)
q2_seq = rnn(q2_embed)
q2_seq = Dropout(0.15)(q2_seq)
q1_aligned, q2_aligned = soft_attention_alignment(q1_seq, q2_seq)
q1_res.append(q2_aligned)
q1_res.append(q1_seq)
q2_res.append(q1_aligned)
q2_res.append(q2_seq)
q1_embed = Concatenate()([q1_embed, q1_seq, q2_aligned,])
q2_embed = Concatenate()([q2_embed, q2_seq, q1_aligned,])
q1_res = Concatenate()(q1_res)
q2_res = Concatenate()(q2_res)
attn = AttentionWeightedAverage()
q1_rep = apply_multiple(q1_embed, [GlobalAvgPool1D(), GlobalMaxPool1D(), attn])
q2_rep = apply_multiple(q2_embed, [GlobalAvgPool1D(), GlobalMaxPool1D(), attn])
# Classifier
q_diff = substract(q1_rep, q2_rep)
q_multi = Multiply()([q1_rep, q2_rep])
h_all = Concatenate()([q1_rep, q2_rep, q_diff, q_multi,])
h_all = Dropout(0.35)(h_all)
h_all = Dense(300, activation='relu')(h_all)
out_ = Dense(3, activation='softmax')(h_all)
model = Model(inputs=[q1, q2, input_layer_3, q1_exact_match, q2_exact_match], outputs=out_)
model.compile(optimizer=Adam(lr=lr, decay=1e-6, clipvalue=1.5), loss='categorical_crossentropy',
metrics=['accuracy', weighted_accuracy])
model.summary()
return model
def get_char_darnn(nb_words, embedding_dim, embedding_matrix, max_sequence_length, out_size,
projection_dim=50, projection_hidden=0, projection_dropout=0.2,
compare_dim=288, compare_dropout=0.2,
dense_dim=50, dense_dropout=0.2,
lr=1e-3, activation='relu'):
q1 = Input(shape=(max_sequence_length,), name='first_sentences')
q2 = Input(shape=(max_sequence_length,), name='second_sentences')
q1_exact_match = Input(shape=(max_sequence_length,), name='first_exact_match')
q2_exact_match = Input(shape=(max_sequence_length,), name='second_exact_match')
input_layer_3 = Input(shape=(36,), name='mata-features', dtype="float32")
embedding = Embedding(nb_words, 150,
weights=[embedding_matrix],
input_length=max_sequence_length,
trainable=False)
em_embeddings = Embedding(2, 1,
input_length=max_sequence_length,
trainable=True)
q1_embed = Concatenate()([embedding(q1), em_embeddings(q1_exact_match)])
q1_embed = SpatialDropout1D(0.1)(q1_embed)
q2_embed = Concatenate()([embedding(q2), em_embeddings(q2_exact_match)])
q2_embed = SpatialDropout1D(0.1)(q2_embed)
rnns = [CuDNNGRU(42, return_sequences=True) for i in range(3)]
q1_res = []
q2_res = []
for idx, rnn in enumerate(rnns):
q1_seq = rnn(q1_embed)
q1_seq = Dropout(0.1)(q1_seq)
q2_seq = rnn(q2_embed)
q2_seq = Dropout(0.1)(q2_seq)
q1_aligned, q2_aligned = soft_attention_alignment(q1_seq, q2_seq)
q1_res.append(q2_aligned)
q1_res.append(q1_seq)
q2_res.append(q1_aligned)
q2_res.append(q2_seq)
q1_embed = Concatenate()([q1_seq, q2_aligned, q1_embed])
q2_embed = Concatenate()([q2_seq, q1_aligned, q2_embed])
# Pooling
#q1_rep = Flatten()(capsule_pooling(q1_encoded))
#q2_rep = Flatten()(capsule_pooling(q2_encoded))
q1_res = Concatenate()(q1_res)
q2_res = Concatenate()(q2_res)
q1_rep = apply_multiple(q1_res, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_res, [GlobalAvgPool1D(), GlobalMaxPool1D()])
# Classifier
q_diff = substract(q1_rep, q2_rep)
q_multi = Multiply()([q1_rep, q2_rep])
h_all = Concatenate()([q1_rep, q2_rep, q_diff, q_multi,])
h_all = Dropout(0.1)(h_all)
h_all = Dense(256, activation='relu')(h_all)
out_ = Dense(3, activation='softmax')(h_all)
model = Model(inputs=[q1, q2, input_layer_3, q1_exact_match, q2_exact_match], outputs=out_)
model.compile(optimizer=Adam(lr=lr, decay=1e-6,), loss='categorical_crossentropy',
metrics=['accuracy', weighted_accuracy])
model.summary()
return model
def get_ESIM(nb_words, embedding_dim, embedding_matrix, max_sequence_length, out_size,
projection_dim=50, projection_hidden=0, projection_dropout=0.2,
compare_dim=288, compare_dropout=0.2,
dense_dim=50, dense_dropout=0.2,
lr=1e-3, activation='relu'):
q1 = Input(shape=(max_sequence_length,), name='first_sentences')
q2 = Input(shape=(max_sequence_length,), name='second_sentences')
q1_exact_match = Input(shape=(max_sequence_length,), name='first_exact_match')
q2_exact_match = Input(shape=(max_sequence_length,), name='second_exact_match')
input_layer_3 = Input(shape=(36,), name='mata-features', dtype="float32")
embedding = Embedding(nb_words, embedding_dim,
weights=[embedding_matrix],
input_length=max_sequence_length,
trainable=False)
q1_embed = embedding(q1)
q1_embed = SpatialDropout1D(0.1)(q1_embed)
q2_embed = embedding(q2)
q2_embed = SpatialDropout1D(0.1)(q2_embed)
batch_norm = BatchNormalization(axis=-1)
q1_embed = batch_norm(q1_embed,)
q2_embed = batch_norm(q2_embed,)
aggreation_gru = Bidirectional(CuDNNLSTM(100, return_sequences=True))
q1_seq = aggreation_gru(q1_embed)
q2_seq = aggreation_gru(q2_embed)
q1_aligned, q2_aligned = soft_attention_alignment(q1_seq, q2_seq)
q1_vec = Concatenate()([q1_seq, q2_aligned, substract(q1_seq, q2_aligned), Multiply()([q1_seq, q2_aligned])])
q2_vec = Concatenate()([q2_seq, q1_aligned, substract(q2_seq, q1_aligned), Multiply()([q2_seq, q1_aligned])])
compare_gru = Bidirectional(CuDNNLSTM(100, return_sequences=True))
q1_rep = compare_gru(q1_vec)
q2_rep = compare_gru(q2_vec)
q1_rep = apply_multiple(q1_rep, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_rep, [GlobalAvgPool1D(), GlobalMaxPool1D()])
h_all = Concatenate()([q1_rep, q2_rep])
h_all = BatchNormalization()(h_all)
h_all = Dense(256, activation='elu')(h_all)
h_all = BatchNormalization()(h_all)
h_all = Dropout(0.5)(h_all)
h_all = Dense(256, activation='elu')(h_all)
h_all = BatchNormalization()(h_all)
h_all = Dropout(0.5)(h_all)
out_ = Dense(3, activation='softmax')(h_all)
model = Model(inputs=[q1, q2, input_layer_3, q1_exact_match, q2_exact_match], outputs=out_)
model.compile(optimizer=Adam(lr=lr, decay=1e-6, clipnorm=1.5,), loss='categorical_crossentropy',
metrics=['accuracy', weighted_accuracy])
model.summary()
return model
def get_char_ESIM(nb_words, embedding_dim, embedding_matrix, max_sequence_length, out_size,
projection_dim=50, projection_hidden=0, projection_dropout=0.2,
compare_dim=288, compare_dropout=0.2,
dense_dim=50, dense_dropout=0.2,
lr=1e-3, activation='relu'):
q1 = Input(shape=(max_sequence_length,), name='first_sentences')
q2 = Input(shape=(max_sequence_length,), name='second_sentences')
q1_exact_match = Input(shape=(max_sequence_length,), name='first_exact_match')
q2_exact_match = Input(shape=(max_sequence_length,), name='second_exact_match')
input_layer_3 = Input(shape=(36,), name='mata-features', dtype="float32")
#input_encoded = BatchNormalization()(input_layer_3)
input_encoded = Dense(2016, activation='elu')(input_layer_3)
input_encoded = Dropout(0.25)(input_encoded)
embedding = Embedding(nb_words, 150,
weights=[embedding_matrix],
input_length=max_sequence_length,
trainable=False)
em_embeddings = Embedding(2, 1,
input_length=max_sequence_length,
trainable=True)
#q1_embed = Concatenate()([embedding(q1), em_embeddings(q1_exact_match)])
q1_embed = embedding(q1)
q1_embed = SpatialDropout1D(0.1)(q1_embed)
#q2_embed = Concatenate()([embedding(q2), em_embeddings(q2_exact_match)])
q2_embed = embedding(q2)
q2_embed = SpatialDropout1D(0.1)(q2_embed)
batch_norm = BatchNormalization(axis=-1)
q1_embed = batch_norm(q1_embed)
q2_embed = batch_norm(q2_embed)
aggreation_gru = Bidirectional(CuDNNLSTM(72, return_sequences=True))
q1_seq = aggreation_gru(q1_embed)
q2_seq = aggreation_gru(q2_embed)
q1_aligned, q2_aligned = soft_attention_alignment(q1_seq, q2_seq)
q1_vec = Concatenate()([q1_seq, q2_aligned, substract(q1_seq, q2_aligned), Multiply()([q1_seq, q2_aligned])])
q2_vec = Concatenate()([q2_seq, q1_aligned, substract(q2_seq, q1_aligned), Multiply()([q2_seq, q1_aligned])])
compare_gru = Bidirectional(CuDNNLSTM(72, return_sequences=True))
q1_rep = compare_gru(q1_vec)
q2_rep = compare_gru(q2_vec)
q1_rep = apply_multiple(q1_rep, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_rep, [GlobalAvgPool1D(), GlobalMaxPool1D()])
h_all = Concatenate()([q1_rep, q2_rep])
h_all = BatchNormalization()(h_all)
h_all = Dense(256, activation='elu')(h_all)
h_all = BatchNormalization()(h_all)
h_all = Dropout(0.2)(h_all)
h_all = Dense(256, activation='elu')(h_all)
h_all = BatchNormalization()(h_all)
h_all = Dropout(0.2)(h_all)
out_ = Dense(3, activation='softmax')(h_all)
model = Model(inputs=[q1, q2, input_layer_3, q1_exact_match, q2_exact_match], outputs=out_)
model.compile(optimizer=Adam(lr=lr, decay=1e-6, clipnorm=1.5, amsgrad=True), loss='categorical_crossentropy',
metrics=['accuracy', weighted_accuracy])
model.summary()
return model
# should add a util.py for scoring
def weighted_accuracy(y_true, y_pred):
weight = np.array([[1/16, 1/15, 1/5]])
norm = [(1/16) + (1/15) + (1/5)]
weight_mask = weight * y_true
label_weights = K.max(K.cast(weight_mask, 'float32'), axis=-1)
true_label = K.argmax(y_true, axis=-1)
pred_label = K.argmax(y_pred, axis=-1)
res = K.cast(K.equal(true_label, pred_label), tf.float32) * label_weights / K.sum(label_weights)
res = K.sum(res)
return res
def numpy_weighted_accuracy(y_true, y_pred):
weight = np.array([[1/16, 1/15, 1/5]])
norm = [(1/16) + (1/15) + (1/5)]
weight_mask = weight * y_true
y_pred = (y_pred > 0.5).astype(int)
res = np.equal(y_pred, y_true) * weight_mask / np.sum(weight_mask)
res = np.sum(res)
return res
def get_decomposable_attention(nb_words, embedding_dim, embedding_matrix, max_sequence_length, out_size,
projection_dim=50, projection_hidden=0, projection_dropout=0.2,
compare_dim=288, compare_dropout=0.2,
dense_dim=50, dense_dropout=0.2,
lr=1e-3, activation='relu'):
q1 = Input(shape=(max_sequence_length,), name='first_sentences')
q2 = Input(shape=(max_sequence_length,), name='second_sentences')
q1_exact_match = Input(shape=(max_sequence_length,), name='first_exact_match')
q2_exact_match = Input(shape=(max_sequence_length,), name='second_exact_match')
input_layer_3 = Input(shape=(36,), name='mata-features', dtype="float32")
embedding = Embedding(nb_words, embedding_dim,
weights=[embedding_matrix],
input_length=max_sequence_length,
trainable=False)
em_embeddings = Embedding(2, 1,
input_length=max_sequence_length,
trainable=True)
#q1_embed = Concatenate()([embedding(q1), em_embeddings(q1_exact_match)])
q1_embed = embedding(q1)
q1_embed = SpatialDropout1D(0.1)(q1_embed)
#q2_embed = Concatenate()([embedding(q2), em_embeddings(q2_exact_match)])
q2_embed = embedding(q2)
q2_embed = SpatialDropout1D(0.1)(q2_embed)
th = TimeDistributed(Highway(activation='relu'))
q1_embed = th(q1_embed)
q2_embed = th(q2_embed)
q1_aligned, q2_aligned = soft_attention_alignment(q1_embed, q2_embed)
q1_vec = Concatenate()([q1_embed, q2_aligned, substract(q1_embed, q2_aligned), Multiply()([q1_embed, q2_aligned])])
q2_vec = Concatenate()([q2_embed, q1_aligned, substract(q2_embed, q1_aligned), Multiply()([q2_embed, q1_aligned])])
dense_compares = [
Dense(300, activation='elu'),
Dropout(0.2),
Dense(200, activation='elu'),
Dropout(0.2),
]
q1_compared = time_distributed(q1_vec, dense_compares)
q2_compared = time_distributed(q2_vec, dense_compares)
q1_rep = apply_multiple(q1_compared, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_compared, [GlobalAvgPool1D(), GlobalMaxPool1D()])
h_all = Concatenate()([q1_rep, q2_rep])
h_all = BatchNormalization()(h_all)
h_all = Dense(256, activation='elu')(h_all)
h_all = Dropout(0.2)(h_all)
h_all = BatchNormalization()(h_all)
h_all = Dense(256, activation='elu')(h_all)
h_all = Dropout(0.2)(h_all)
h_all = BatchNormalization()(h_all)
out_ = Dense(3, activation='softmax')(h_all)
| |
import os
import math
import tensorflow as tf
from collections import namedtuple
from .model import Model
from .builder import MODELS
from core.layers import build_normalization
PARAMS = {
# (width_coefficient, depth_coefficient, resolution, dropout_rate)
"efficientnet-b0": (1.0, 1.0, 224, 0.2),
"efficientnet-b1": (1.0, 1.1, 240, 0.2),
"efficientnet-b2": (1.1, 1.2, 260, 0.3),
"efficientnet-b3": (1.2, 1.4, 300, 0.3),
"efficientnet-b4": (1.4, 1.8, 380, 0.4),
"efficientnet-b5": (1.6, 2.2, 456, 0.4),
"efficientnet-b6": (1.8, 2.6, 528, 0.5),
"efficientnet-b7": (2.0, 3.1, 600, 0.5),
}
GlobalParams = namedtuple("GlobalParams", [
"batch_norm_momentum", "batch_norm_epsilon", "width_coefficient",
"depth_coefficient", "depth_divisor", "min_depth", "drop_connect_rate",
"data_format", "dropout_rate", "num_classes"
])
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs = namedtuple("BlockArgs", [
"repeats", "in_filters", "out_filters", "kernel_size",
"strides", "expand_ratio", "se_ratio", "id_skip", "super_pixel", "trainable"
])
BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields)
def round_filters(filters, global_params):
# orig_filters =filters
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if multiplier is None:
return filters
min_depth = min_depth or divisor
filters *= multiplier
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
multiplier = global_params.depth_coefficient
if multiplier is None:
return repeats
return int(math.ceil(repeats * multiplier))
class DropConnect(tf.keras.layers.Layer):
def __init__(self, drop_rate=None, **kwargs):
super(DropConnect, self).__init__(**kwargs)
self.drop_rate = drop_rate if drop_rate is not None else 0.
def _drop(self, inputs, drop_rate):
random_tensor = tf.convert_to_tensor(drop_rate, dtype=inputs.dtype)
batch_size = tf.shape(inputs)[0]
random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.math.floor(random_tensor)
return tf.divide(inputs, random_tensor) * binary_tensor
def call(self, inputs, training=None):
if training or self.drop_rate > 0.:
return self._drop(inputs, self.drop_rate)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def conv2d_kernel_initializer(shape, dtype=tf.float32):
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random.normal(shape, 0.0, math.sqrt(2. / fan_out), dtype=dtype)
def dense_kernel_initializer(shape, dtype=tf.float32):
init_range = 1.0 / math.sqrt(shape[1])
return tf.random.uniform(shape, -init_range, init_range, dtype)
def mbconv_block(inputs,
global_params,
block_args,
normalization,
drop_connect_rate=None,
trainable=True,
name=""):
expand_ratio = block_args.expand_ratio
data_format = global_params.data_format
_momentum = global_params.batch_norm_momentum
_epsilon = global_params.batch_norm_epsilon
_axis = normalization["axis"]
_mean_axis = [1, 2] if _axis == -1 or _axis == 3 else [2, 3]
filters = block_args.in_filters * expand_ratio
expand_ratio = expand_ratio
if expand_ratio != 1:
x = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
data_format=data_format,
use_bias=False,
name=name+"/conv2d",
trainable=trainable,
kernel_initializer=conv2d_kernel_initializer)(inputs)
x = build_normalization(**normalization,
name=name+"/batch_normalization")(x)
x = tf.keras.layers.Activation("swish", name=name+"/swish")(x)
else:
x = inputs
# Depthwise Convolution
# if block_args.strides == 2:
# x = tf.keras.layers.ZeroPadding2D(
# padding=imagenet_utils.correct_pad(x, block_args.kernel_size),
# name=name + '/dwconv_pad')(x)
# conv_pad = 'valid'
# else:
# conv_pad = 'same'
x = tf.keras.layers.DepthwiseConv2D(kernel_size=block_args.kernel_size,
strides=block_args.strides,
padding="same",
data_format=data_format,
use_bias=False,
trainable=trainable,
depthwise_initializer=conv2d_kernel_initializer,
name=name+"/depthwise_conv2d")(x)
x = build_normalization(**normalization,
name=name+"/batch_normalization"
if expand_ratio == 1 else name+"/batch_normalization_1")(x)
x = tf.keras.layers.Activation("swish", name=name+"/swish" if expand_ratio == 1 else name+"/swish_1")(x)
has_se = block_args.se_ratio is not None and 0 < block_args.se_ratio < 1
if has_se:
squeeze_filters = max(1, int(block_args.in_filters * block_args.se_ratio))
se = tf.keras.layers.Lambda(lambda inp: tf.reduce_mean(inp, axis=_mean_axis, keepdims=True),
name=name+"/se/global_pooling")(x)
se = tf.keras.layers.Conv2D(filters=squeeze_filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
data_format=data_format,
use_bias=True,
kernel_initializer=conv2d_kernel_initializer,
trainable=trainable,
name=name + "/se/conv2d")(se)
se = tf.keras.layers.Activation("swish", name=name + "/se/swish_1")(se)
se = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
data_format=data_format,
use_bias=True,
trainable=trainable,
kernel_initializer=conv2d_kernel_initializer,
name=name + "/se/conv2d_1")(se)
se = tf.keras.layers.Activation("sigmoid", name=name+"/se/sigmoid")(se)
x = tf.keras.layers.Multiply(name=name + "/se/multiply")([se, x])
x = tf.keras.layers.Conv2D(block_args.out_filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
data_format=data_format,
use_bias=False,
trainable=trainable,
kernel_initializer=conv2d_kernel_initializer,
name=name+"/conv2d" if expand_ratio == 1 else name+"/conv2d_1")(x)
x = build_normalization(**normalization,
name=name+"/batch_normalization_2"
if expand_ratio > 1 else name+"/batch_normalization_1")(x)
if block_args.id_skip:
if all(s == 1 for s in block_args.strides) and block_args.in_filters == block_args.out_filters:
# x = DropConnect(drop_connect_rate, name=name + "/drop_connect")(x)
# x = tf.keras.layers.Dropout(drop_connect_rate, noise_shape=(None, 1, 1, 1), name=name + '/drop')(x)
x = tf.keras.layers.Add(name=name + "/add")([x, inputs])
return x
class EfficientNet(Model):
def _get_global_params(self, name, data_format):
return GlobalParams(
batch_norm_momentum=0.9,
batch_norm_epsilon=1e-3,
width_coefficient=PARAMS[name][0],
depth_coefficient=PARAMS[name][1],
depth_divisor=8,
min_depth=None,
drop_connect_rate=0.2,
data_format=data_format,
dropout_rate=PARAMS[name][-1],
num_classes=1000
)
def _get_block_args(self):
return [
BlockArgs(1, 32, 16, (3, 3), (1, 1), 1, 0.25, True),
BlockArgs(2, 16, 24, (3, 3), (2, 2), 6, 0.25, True),
BlockArgs(2, 24, 40, (5, 5), (2, 2), 6, 0.25, True),
BlockArgs(3, 40, 80, (3, 3), (2, 2), 6, 0.25, True),
BlockArgs(3, 80, 112, (5, 5), (1, 1), 6, 0.25, True),
BlockArgs(4, 112, 192, (5, 5), (2, 2), 6, 0.25, True),
BlockArgs(1, 192, 320, (3, 3), (1, 1), 6, 0.25, True)
]
def __init__(self,
name,
convolution='conv2d',
normalization=dict(normalization="batch_norm", momentum=0.9, epsilon=1e-3, axis=-1, trainable=True),
activation=dict(activation="swish"),
output_indices=(3, 4),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1,1,1,1,1),
frozen_stages=( -1),
dropblock=None,
input_shape=None,
input_tensor=None,
**kwargs):
data_format = tf.keras.backend.image_data_format()
self.bn_axis = 3 if data_format == 'channels_last' else 1
default_size = PARAMS[name][2]
dropout_connect_rate = PARAMS[name][3]
default_shape = [default_size, default_size, 3] if self.bn_axis == 3 else [3, default_size, default_size]
input_shape = input_shape or default_shape
super(EfficientNet, self).__init__(name=name,
convolution=convolution,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
dropblock=dropblock,
input_shape=input_shape,
input_tensor=input_tensor)
self.backbone_name = name
self.data_format = data_format
self.global_params = self._get_global_params(name, self.data_format)
self.block_args = self._get_block_args()
self._drop_connect_rate = dropout_connect_rate
self.num_blocks = 0
for args in self.block_args:
self.num_blocks += round_repeats(args.repeats, global_params=self.global_params)
@property
def blocks(self):
blocks = []
for i, args in enumerate(self.block_args):
assert args.repeats >= 1
# assert args.super_pixel in [0, 1, 2]
in_filters = round_filters(args.in_filters, self.global_params)
out_filters = round_filters(args.out_filters, self.global_params)
args = args._replace(in_filters=in_filters,
out_filters=out_filters,
repeats=round_repeats(args.repeats, self.global_params),
trainable=i + 2 not in self.frozen_stages)
blocks.append(args)
if args.repeats > 1:
args = args._replace(in_filters=out_filters, strides=(1, 1))
for i in range(args.repeats - 1):
blocks.append(args)
return blocks
def build_model(self):
def _norm(inp):
mean = tf.constant([0.485, 0.456, 0.406], inp.dtype, [1, 1, 1, 3]) * 255.
std = 1. / (tf.constant([0.229, 0.224, 0.225], inp.dtype, [1, 1, 1, 3]) * 255.)
return (inp - mean) * std
x = tf.keras.layers.Lambda(_norm, name="norm_input")(self.img_input)
x = tf.keras.layers.Conv2D(round_filters(32, self.global_params),
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
data_format=self.data_format,
use_bias=False,
kernel_initializer=conv2d_kernel_initializer,
trainable=1 not in self.frozen_stages,
name=self.name + "/stem/conv2d")(x)
x = build_normalization(**self.normalization, name=self.name + "/stem/batch_normalization")(x)
x = tf.keras.layers.Activation("swish", name=self.name + "/stem/swish")(x)
block_outputs = []
for idx, b_args in enumerate(self.blocks):
drop_rate = self._drop_connect_rate
is_reduction = False
if b_args.super_pixel == 1 and idx == 0:
block_outputs.append(x)
elif (idx == self.num_blocks - 1) or self.blocks[idx+1].strides[0] > 1:
is_reduction = True
if drop_rate:
drop_rate = 1.0 - drop_rate * float(idx) / self.num_blocks
x = mbconv_block(x,
global_params=self.global_params,
block_args=b_args,
normalization=self.normalization,
drop_connect_rate=drop_rate,
trainable=b_args.trainable,
name=self.name + "/blocks_%d" % idx)
if is_reduction:
block_outputs.append(x)
if -1 in self.output_indices:
# Head part.
x = tf.keras.layers.Conv2D(filters=round_filters(1280, self.global_params),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=conv2d_kernel_initializer,
padding="same",
use_bias=False,
data_format=self.data_format,
name=self.name + "/head/conv2d")(x)
x = build_normalization(**self.normalization, name=self.name + "/head/batch_normalization")(x)
x = tf.keras.layers.Activation("swish", name=self.name + "/head/swish")(x)
x = tf.keras.layers.GlobalAveragePooling2D(data_format=self.data_format,
name=self.name + "/head/global_avg_pooling")(x)
x = tf.keras.layers.Dropout(self.global_params.dropout_rate, name=self.name + "/head/dropout")(x)
x = tf.keras.layers.Dense(self.global_params.num_classes,
kernel_initializer=dense_kernel_initializer,
name=self.name + "/head/dense")(x)
outputs = x
else:
outputs = [block_outputs[i - 1] for i in self.output_indices]
return tf.keras.Model(inputs=self.img_input, outputs=outputs, name=self.name)
def init_weights(self, pretrained_weight_path=None):
if pretrained_weight_path is not None:
pretrained_weights = tf.train.latest_checkpoint(pretrained_weight_path)
assert pretrained_weights is not None, "Error! Please check path {}".format(pretrained_weight_path)
# use_exponential_moving_average = False
# for w in tf.train.list_variables(pretrained_weights):
# if "ExponentialMovingAverage" not in w[0]:
# # use_exponential_moving_average = True
# if "box_net" in w[0]:
# print(w[0], w[1])
for weight in self.model.weights:
name = weight.name.split(":")[0]
# print(name, weight.shape)
# if "box-predict" in name or "class-predict" in name:
# continue
if "batch_normalization" in name:
name = name.replace("batch_normalization", "tpu_batch_normalization")
# if use_exponential_moving_average:
# name += "/ExponentialMovingAverage"
try:
pretrained_weight = tf.train.load_variable(pretrained_weights, name)
weight.assign(pretrained_weight)
except Exception as e:
print(str(e), "{} not in {}.".format(name, pretrained_weight_path))
@MODELS.register("EfficientNetB0")
def EfficientNetB0(input_shape,
convolution='conv2d',
normalization=dict(normalization="batch_norm", momentum=0.9, epsilon=1e-3, axis=-1, trainable=True),
activation=dict(activation="swish"),
output_indices=(3, 4),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=(-1, ),
dropblock=None,
input_tensor=None,
classifier_activation='softmax'):
return EfficientNet(name="efficientnet-b0",
convolution=convolution,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
dropblock=dropblock,
input_shape=input_shape,
input_tensor=input_tensor,
classifier_activation=classifier_activation).build_model()
@MODELS.register("EfficientNetB1")
def EfficientNetB1(input_shape,
convolution='conv2d',
normalization=dict(normalization="batch_norm", momentum=0.9, epsilon=1e-3, axis=-1, trainable=True),
activation=dict(activation="swish"),
output_indices=(3, 4),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=( -1),
dropblock=None,
input_tensor=None,
classifier_activation="softmax"):
return EfficientNet(name="efficientnet-b1",
convolution=convolution,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
dropblock=dropblock,
input_shape=input_shape,
input_tensor=input_tensor,
classifier_activation=classifier_activation).build_model()
@MODELS.register("EfficientNetB2")
def EfficientNetB2(input_shape,
convolution='conv2d',
normalization=dict(normalization="batch_norm", momentum=0.9, epsilon=1e-3, axis=-1, trainable=True),
activation=dict(activation="swish"),
output_indices=(3, 4),
strides=(2, 2, 2, 2, 2),
dilation_rates=(1, 1, 1, 1, 1),
frozen_stages=( -1),
dropblock=None,
input_tensor=None,
classifier_activation="softmax"):
return EfficientNet(name="efficientnet-b2",
convolution=convolution,
normalization=normalization,
activation=activation,
output_indices=output_indices,
strides=strides,
dilation_rates=dilation_rates,
frozen_stages=frozen_stages,
| |
from django.contrib.auth.models import Permission
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext_lazy as _
from django.utils.translation import gettext
from draftjs_exporter.dom import DOM
import wagtail.admin.rich_text.editors.draftail.features as draftail_features
from wagtail.admin.auth import user_has_any_page_permission
from wagtail.admin.localization import get_available_admin_languages, get_available_admin_time_zones
from wagtail.admin.menu import MenuItem, SubmenuMenuItem, reports_menu, settings_menu
from wagtail.admin.navigation import get_explorable_root_page
from wagtail.admin.rich_text import (
HalloFormatPlugin, HalloHeadingPlugin, HalloListPlugin, HalloPlugin)
from wagtail.admin.rich_text.converters.contentstate import link_entity
from wagtail.admin.rich_text.converters.editor_html import (
LinkTypeRule, PageLinkHandler, WhitelistRule)
from wagtail.admin.rich_text.converters.html_to_contentstate import (
BlockElementHandler, ExternalLinkElementHandler, HorizontalRuleHandler,
InlineStyleElementHandler, ListElementHandler, ListItemElementHandler, PageLinkElementHandler)
from wagtail.admin.search import SearchArea
from wagtail.admin.site_summary import PagesSummaryItem
from wagtail.admin.views.account import email_management_enabled, password_management_enabled
from wagtail.admin.viewsets import viewsets
from wagtail.admin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton
from wagtail.core import hooks
from wagtail.core.models import UserPagePermissionsProxy
from wagtail.core.permissions import (
collection_permission_policy, task_permission_policy, workflow_permission_policy)
from wagtail.core.whitelist import allow_without_attributes, attribute_rule, check_url
class ExplorerMenuItem(MenuItem):
template = 'wagtailadmin/shared/explorer_menu_item.html'
def is_shown(self, request):
return user_has_any_page_permission(request.user)
def get_context(self, request):
context = super().get_context(request)
start_page = get_explorable_root_page(request.user)
if start_page:
context['start_page_id'] = start_page.id
return context
@hooks.register('register_admin_menu_item')
def register_explorer_menu_item():
return ExplorerMenuItem(
_('Pages'), reverse('wagtailadmin_explore_root'),
name='explorer',
icon_name='folder-open-inverse',
order=100)
class SettingsMenuItem(SubmenuMenuItem):
template = 'wagtailadmin/shared/menu_settings_menu_item.html'
@hooks.register('register_admin_menu_item')
def register_settings_menu():
return SettingsMenuItem(
_('Settings'),
settings_menu,
icon_name='cogs',
order=10000)
@hooks.register('register_permissions')
def register_permissions():
return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')
class PageSearchArea(SearchArea):
def __init__(self):
super().__init__(
_('Pages'), reverse('wagtailadmin_pages:search'),
name='pages',
classnames='icon icon-folder-open-inverse',
order=100)
def is_shown(self, request):
return user_has_any_page_permission(request.user)
@hooks.register('register_admin_search_area')
def register_pages_search_area():
return PageSearchArea()
class CollectionsMenuItem(MenuItem):
def is_shown(self, request):
return collection_permission_policy.user_has_any_permission(
request.user, ['add', 'change', 'delete']
)
@hooks.register('register_settings_menu_item')
def register_collections_menu_item():
return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), icon_name='folder-open-1', order=700)
class WorkflowsMenuItem(MenuItem):
def is_shown(self, request):
return workflow_permission_policy.user_has_any_permission(
request.user, ['add', 'change', 'delete']
)
class WorkflowTasksMenuItem(MenuItem):
def is_shown(self, request):
return task_permission_policy.user_has_any_permission(
request.user, ['add', 'change', 'delete']
)
@hooks.register('register_settings_menu_item')
def register_workflows_menu_item():
return WorkflowsMenuItem(_('Workflows'), reverse('wagtailadmin_workflows:index'), icon_name='tasks', order=100)
@hooks.register('register_settings_menu_item')
def register_workflow_tasks_menu_item():
return WorkflowTasksMenuItem(_('Workflow tasks'), reverse('wagtailadmin_workflows:task_index'), icon_name='thumbtack', order=150)
@hooks.register('register_page_listing_buttons')
def page_listing_buttons(page, page_perms, is_parent=False, next_url=None):
if page_perms.can_edit():
yield PageListingButton(
_('Edit'),
reverse('wagtailadmin_pages:edit', args=[page.id]),
attrs={'aria-label': _("Edit '%(title)s'") % {'title': page.get_admin_display_title()}},
priority=10
)
if page.has_unpublished_changes and page.is_previewable():
yield PageListingButton(
_('View draft'),
reverse('wagtailadmin_pages:view_draft', args=[page.id]),
attrs={
'aria-label': _("Preview draft version of '%(title)s'") % {'title': page.get_admin_display_title()},
'target': '_blank', 'rel': 'noopener noreferrer'
},
priority=20
)
if page.live and page.url:
yield PageListingButton(
_('View live'),
page.url,
attrs={
'target': "_blank", 'rel': 'noopener noreferrer',
'aria-label': _("View live version of '%(title)s'") % {'title': page.get_admin_display_title()},
},
priority=30
)
if page_perms.can_add_subpage():
if is_parent:
yield Button(
_('Add child page'),
reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
attrs={
'aria-label': _("Add a child page to '%(title)s' ") % {'title': page.get_admin_display_title()},
},
classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'},
priority=40
)
else:
yield PageListingButton(
_('Add child page'),
reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
attrs={'aria-label': _("Add a child page to '%(title)s' ") % {'title': page.get_admin_display_title()}},
priority=40
)
yield ButtonWithDropdownFromHook(
_('More'),
hook_name='register_page_listing_more_buttons',
page=page,
page_perms=page_perms,
is_parent=is_parent,
next_url=next_url,
attrs={
'target': '_blank', 'rel': 'noopener noreferrer',
'title': _("View more options for '%(title)s'") % {'title': page.get_admin_display_title()}
},
priority=50
)
@hooks.register('register_page_listing_more_buttons')
def page_listing_more_buttons(page, page_perms, is_parent=False, next_url=None):
if page_perms.can_move():
yield Button(
_('Move'),
reverse('wagtailadmin_pages:move', args=[page.id]),
attrs={"title": _("Move page '%(title)s'") % {'title': page.get_admin_display_title()}},
priority=10
)
if page_perms.can_copy():
url = reverse('wagtailadmin_pages:copy', args=[page.id])
if next_url:
url += '?' + urlencode({'next': next_url})
urlencode
yield Button(
_('Copy'),
url,
attrs={'title': _("Copy page '%(title)s'") % {'title': page.get_admin_display_title()}},
priority=20
)
if page_perms.can_delete():
url = reverse('wagtailadmin_pages:delete', args=[page.id])
if next_url:
url += '?' + urlencode({'next': next_url})
yield Button(
_('Delete'),
url,
attrs={'title': _("Delete page '%(title)s'") % {'title': page.get_admin_display_title()}},
priority=30
)
if page_perms.can_unpublish():
url = reverse('wagtailadmin_pages:unpublish', args=[page.id])
if next_url:
url += '?' + urlencode({'next': next_url})
yield Button(
_('Unpublish'),
url,
attrs={'title': _("Unpublish page '%(title)s'") % {'title': page.get_admin_display_title()}},
priority=40
)
if page_perms.can_view_revisions():
yield Button(
_('History'),
reverse('wagtailadmin_pages:history', args=[page.id]),
attrs={'title': _("View page history for '%(title)s'") % {'title': page.get_admin_display_title()}},
priority=50
)
@hooks.register('register_admin_urls')
def register_viewsets_urls():
viewsets.populate()
return viewsets.get_urlpatterns()
@hooks.register('register_account_menu_item')
def register_account_set_profile_picture(request):
return {
'url': reverse('wagtailadmin_account_change_avatar'),
'label': _('Set profile picture'),
'help_text': _("Change your profile picture.")
}
@hooks.register('register_account_menu_item')
def register_account_change_email(request):
if email_management_enabled():
return {
'url': reverse('wagtailadmin_account_change_email'),
'label': _('Change email'),
'help_text': _('Change the email address linked to your account.'),
}
@hooks.register('register_account_menu_item')
def register_account_change_password(request):
if password_management_enabled() and request.user.has_usable_password():
return {
'url': reverse('wagtailadmin_account_change_password'),
'label': _('Change password'),
'help_text': _('Change the password you use to log in.'),
}
@hooks.register('register_account_menu_item')
def register_account_notification_preferences(request):
user_perms = UserPagePermissionsProxy(request.user)
if user_perms.can_edit_pages() or user_perms.can_publish_pages():
return {
'url': reverse('wagtailadmin_account_notification_preferences'),
'label': _('Notification preferences'),
'help_text': _('Choose which email notifications to receive.'),
}
@hooks.register('register_account_menu_item')
def register_account_preferred_language_preferences(request):
if len(get_available_admin_languages()) > 1:
return {
'url': reverse('wagtailadmin_account_language_preferences'),
'label': _('Language preferences'),
'help_text': _('Choose the language you want to use here.'),
}
@hooks.register('register_account_menu_item')
def register_account_current_time_zone(request):
if len(get_available_admin_time_zones()) > 1:
return {
'url': reverse('wagtailadmin_account_current_time_zone'),
'label': _('Current Time Zone'),
'help_text': _('Choose your current time zone.'),
}
@hooks.register('register_account_menu_item')
def register_account_change_name(request):
return {
'url': reverse('wagtailadmin_account_change_name'),
'label': _('Change name'),
'help_text': _('Change your first and last name on your account.'),
}
@hooks.register('register_rich_text_features')
def register_core_features(features):
# Hallo.js
features.register_editor_plugin(
'hallo', 'hr',
HalloPlugin(
name='hallohr',
js=['wagtailadmin/js/hallo-plugins/hallo-hr.js'],
order=45,
)
)
features.register_converter_rule('editorhtml', 'hr', [
WhitelistRule('hr', allow_without_attributes)
])
features.register_editor_plugin(
'hallo', 'link',
HalloPlugin(
name='hallowagtaillink',
js=[
'wagtailadmin/js/page-chooser-modal.js',
'wagtailadmin/js/hallo-plugins/hallo-wagtaillink.js',
],
)
)
features.register_converter_rule('editorhtml', 'link', [
WhitelistRule('a', attribute_rule({'href': check_url})),
LinkTypeRule('page', PageLinkHandler),
])
features.register_editor_plugin(
'hallo', 'bold', HalloFormatPlugin(format_name='bold')
)
features.register_converter_rule('editorhtml', 'bold', [
WhitelistRule('b', allow_without_attributes),
WhitelistRule('strong', allow_without_attributes),
])
features.register_editor_plugin(
'hallo', 'italic', HalloFormatPlugin(format_name='italic')
)
features.register_converter_rule('editorhtml', 'italic', [
WhitelistRule('i', allow_without_attributes),
WhitelistRule('em', allow_without_attributes),
])
headings_elements = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
headings_order_start = HalloHeadingPlugin.default_order + 1
for order, element in enumerate(headings_elements, start=headings_order_start):
features.register_editor_plugin(
'hallo', element, HalloHeadingPlugin(element=element, order=order)
)
features.register_converter_rule('editorhtml', element, [
WhitelistRule(element, allow_without_attributes)
])
features.register_editor_plugin(
'hallo', 'ol', HalloListPlugin(list_type='ordered')
)
features.register_converter_rule('editorhtml', 'ol', [
WhitelistRule('ol', allow_without_attributes),
WhitelistRule('li', allow_without_attributes),
])
features.register_editor_plugin(
'hallo', 'ul', HalloListPlugin(list_type='unordered')
)
features.register_converter_rule('editorhtml', 'ul', [
WhitelistRule('ul', allow_without_attributes),
WhitelistRule('li', allow_without_attributes),
])
# Draftail
features.register_editor_plugin(
'draftail', 'hr', draftail_features.BooleanFeature('enableHorizontalRule')
)
features.register_converter_rule('contentstate', 'hr', {
'from_database_format': {
'hr': HorizontalRuleHandler(),
},
'to_database_format': {
'entity_decorators': {'HORIZONTAL_RULE': lambda props: DOM.create_element('hr')}
}
})
features.register_editor_plugin(
'draftail', 'h1', draftail_features.BlockFeature({
'label': 'H1',
'type': 'header-one',
'description': gettext('Heading %(level)d') % {'level': 1},
})
)
features.register_converter_rule('contentstate', 'h1', {
'from_database_format': {
'h1': BlockElementHandler('header-one'),
},
'to_database_format': {
'block_map': {'header-one': 'h1'}
}
})
features.register_editor_plugin(
'draftail', 'h2', draftail_features.BlockFeature({
'label': 'H2',
'type': 'header-two',
'description': gettext('Heading %(level)d') % {'level': 2},
})
)
features.register_converter_rule('contentstate', 'h2', {
'from_database_format': {
'h2': BlockElementHandler('header-two'),
},
'to_database_format': {
'block_map': {'header-two': 'h2'}
}
})
features.register_editor_plugin(
'draftail', 'h3', draftail_features.BlockFeature({
'label': 'H3',
'type': 'header-three',
'description': gettext('Heading %(level)d') % {'level': 3},
})
)
features.register_converter_rule('contentstate', 'h3', {
'from_database_format': {
'h3': BlockElementHandler('header-three'),
},
'to_database_format': {
'block_map': {'header-three': 'h3'}
}
})
features.register_editor_plugin(
'draftail', 'h4', draftail_features.BlockFeature({
'label': 'H4',
'type': 'header-four',
'description': gettext('Heading %(level)d') % {'level': 4},
})
)
features.register_converter_rule('contentstate', 'h4', {
'from_database_format': {
'h4': BlockElementHandler('header-four'),
},
'to_database_format': {
'block_map': {'header-four': 'h4'}
}
})
features.register_editor_plugin(
'draftail', 'h5', draftail_features.BlockFeature({
'label': 'H5',
'type': 'header-five',
'description': gettext('Heading %(level)d') % {'level': 5},
})
)
features.register_converter_rule('contentstate', 'h5', {
'from_database_format': {
'h5': BlockElementHandler('header-five'),
},
'to_database_format': {
'block_map': {'header-five': 'h5'}
}
})
features.register_editor_plugin(
'draftail', 'h6', draftail_features.BlockFeature({
'label': 'H6',
'type': 'header-six',
'description': gettext('Heading %(level)d') % {'level': 6},
})
)
features.register_converter_rule('contentstate', 'h6', {
'from_database_format': {
'h6': BlockElementHandler('header-six'),
},
'to_database_format': {
'block_map': {'header-six': 'h6'}
}
})
features.register_editor_plugin(
'draftail', 'ul', draftail_features.BlockFeature({
'type': 'unordered-list-item',
'icon': 'list-ul',
'description': gettext('Bulleted list'),
})
)
features.register_converter_rule('contentstate', 'ul', {
'from_database_format': {
'ul': ListElementHandler('unordered-list-item'),
'li': ListItemElementHandler(),
},
'to_database_format': {
'block_map': {'unordered-list-item': {'element': 'li', 'wrapper': 'ul'}}
}
})
features.register_editor_plugin(
'draftail', 'ol', draftail_features.BlockFeature({
'type': 'ordered-list-item',
'icon': 'list-ol',
'description': gettext('Numbered list'),
})
)
features.register_converter_rule('contentstate', 'ol', {
'from_database_format': {
'ol': ListElementHandler('ordered-list-item'),
'li': ListItemElementHandler(),
},
'to_database_format': {
'block_map': {'ordered-list-item': {'element': 'li', 'wrapper': 'ol'}}
}
})
features.register_editor_plugin(
'draftail', 'blockquote', draftail_features.BlockFeature({
'type': 'blockquote',
'icon': 'openquote',
'description': gettext('Blockquote'),
})
)
features.register_converter_rule('contentstate', 'blockquote', {
'from_database_format': {
'blockquote': BlockElementHandler('blockquote'),
},
'to_database_format': {
'block_map': {'blockquote': 'blockquote'}
}
})
features.register_editor_plugin(
'draftail', 'bold', draftail_features.InlineStyleFeature({
'type': 'BOLD',
'icon': 'bold',
'description': gettext('Bold'),
})
)
features.register_converter_rule('contentstate', 'bold', {
'from_database_format': {
'b': InlineStyleElementHandler('BOLD'),
'strong': InlineStyleElementHandler('BOLD'),
},
'to_database_format': {
'style_map': {'BOLD': 'b'}
}
})
features.register_editor_plugin(
'draftail', 'italic', draftail_features.InlineStyleFeature({
'type': 'ITALIC',
'icon': 'italic',
'description': gettext('Italic'),
})
)
features.register_converter_rule('contentstate', 'italic', {
'from_database_format': {
'i': InlineStyleElementHandler('ITALIC'),
'em': InlineStyleElementHandler('ITALIC'),
},
'to_database_format': {
'style_map': {'ITALIC': 'i'}
}
})
features.register_editor_plugin(
'draftail', 'link', draftail_features.EntityFeature({
'type': 'LINK',
'icon': 'link',
'description': gettext('Link'),
# We want to enforce constraints on which links can be pasted into rich text.
# Keep only the attributes Wagtail needs.
'attributes': ['url', 'id', 'parentId'],
'whitelist': {
# Keep pasted links with http/https protocol, and not-pasted links (href = undefined).
'href': "^(http:|https:|undefined$)",
}
}, js=[
'wagtailadmin/js/page-chooser-modal.js',
])
)
features.register_converter_rule('contentstate', 'link', {
'from_database_format': {
'a[href]': ExternalLinkElementHandler('LINK'),
'a[linktype="page"]': PageLinkElementHandler('LINK'),
},
'to_database_format': {
'entity_decorators': {'LINK': link_entity}
}
})
features.register_editor_plugin(
'draftail', 'superscript', draftail_features.InlineStyleFeature({
'type': 'SUPERSCRIPT',
'icon': 'superscript',
'description': gettext('Superscript'),
})
)
features.register_converter_rule('contentstate', 'superscript', {
'from_database_format': {
'sup': InlineStyleElementHandler('SUPERSCRIPT'),
},
'to_database_format': {
'style_map': {'SUPERSCRIPT': 'sup'}
}
})
features.register_editor_plugin(
'draftail', 'subscript', draftail_features.InlineStyleFeature({
'type': 'SUBSCRIPT',
'icon': 'subscript',
'description': gettext('Subscript'),
})
)
features.register_converter_rule('contentstate', 'subscript', {
'from_database_format': {
'sub': InlineStyleElementHandler('SUBSCRIPT'),
},
'to_database_format': {
'style_map': {'SUBSCRIPT': 'sub'}
}
})
features.register_editor_plugin(
'draftail', 'strikethrough', draftail_features.InlineStyleFeature({
'type': 'STRIKETHROUGH',
'icon': 'strikethrough',
'description': gettext('Strikethrough'),
})
)
features.register_converter_rule('contentstate', 'strikethrough', {
'from_database_format': {
's': InlineStyleElementHandler('STRIKETHROUGH'),
},
'to_database_format': {
'style_map': {'STRIKETHROUGH': 's'}
}
})
features.register_editor_plugin(
'draftail', 'code', draftail_features.InlineStyleFeature({
| |
if ax == None:
fig, ax = plt.subplots(1,1)
else:
fig = plt.gcf() # will this work like this? <<<
if draw_body:
for body in self.bodyList:
#body.draw(ax)
plt.plot(body.r6[0],body.r6[1],'ko',markersize=5)
j = 0
for line in self.lineList:
j = j + 1
if color==None and isinstance(line.type, str):
if 'chain' in line.type:
line.drawLine2d(time, ax, color=[.1, 0, 0], Xuvec=Xuvec, Yuvec=Yuvec, colortension=colortension, cmap=cmap_tension)
elif 'rope' in line.type or 'polyester' in line.type:
line.drawLine2d(time, ax, color=[.3,.5,.5], Xuvec=Xuvec, Yuvec=Yuvec, colortension=colortension, cmap=cmap_tension)
else:
line.drawLine2d(time, ax, color=[0.3,0.3,0.3], Xuvec=Xuvec, Yuvec=Yuvec, colortension=colortension, cmap=cmap_tension)
else:
line.drawLine2d(time, ax, color=color, Xuvec=Xuvec, Yuvec=Yuvec, colortension=colortension, cmap=cmap_tension)
# Add Line labels
if linelabels == True:
xloc = np.dot([(line.rA[0]+line.rB[0])/2, (line.rA[1]+line.rB[1])/2, (line.rA[2]+line.rB[2])/2],Xuvec)
yloc = np.dot([(line.rA[0]+line.rB[0])/2, (line.rA[1]+line.rB[1])/2, (line.rA[2]+line.rB[2])/2],Yuvec)
ax.text(xloc,yloc,j)
if cbar_tension:
maxten = max([max(line.getLineTens()) for line in self.lineList]) # find the max tension in the System
minten = min([min(line.getLineTens()) for line in self.lineList]) # find the min tension in the System
bounds = range(int(minten),int(maxten), int((maxten-minten)/256))
norm = mpl.colors.BoundaryNorm(bounds, 256) # set the bounds in a norm object, with 256 being the length of all colorbar strings
fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap_tension), label='Tension (N)') # add the colorbar
fig.tight_layout()
# Add point labels
i = 0
for point in self.pointList:
i = i + 1
if pointlabels == True:
xloc = np.dot([point.r[0], point.r[1], point.r[2]], Xuvec)
yloc = np.dot([point.r[0], point.r[1], point.r[2]], Yuvec)
ax.text(xloc, yloc, i, c = 'r')
if isinstance(bathymetry, str): # or, if it's a string, load in the bathymetry file
# parse through the MoorDyn bathymetry file
bathGrid_Xs, bathGrid_Ys, bathGrid = self.readBathymetryFile(bathymetry)
X, Y = np.meshgrid(bathGrid_Xs, bathGrid_Ys)
Z = -bathGrid
if rang=='hold':
rang = (np.min(Z), np.max(Z))
Xind = Xuvec.index(1); Yind = Yuvec.index(1); Zind = int(3-Xind-Yind)
W = [X,Y,Z]
# plot a contour profile of the bathymetry
bath = ax.contourf(W[Xind],W[Yind],W[Zind], cmap=cmap_bath, levels=levels, alpha=alpha, vmin=rang[0], vmax=rang[1])
if cbar_bath_aspect!=20 or cbar_bath_ticks!=None: # make sure the colorbar is turned on just in case it isn't when the other colorbar inputs are used
cbar_bath=True
if cbar_bath:
fig.colorbar(bath, label='depth (m)', aspect=cbar_bath_aspect, ticks=cbar_bath_ticks)
ax.axis("equal")
ax.set_title(title)
return fig, ax # return the figure and axis object in case it will be used later to update the plot
def animateSolution(self, DOFtype="free"):
'''Creates an animation of the system
Returns
-------
None.
'''
# first draw a plot of DOFs and forces
x = np.array(self.Xs)
f = np.array(self.Es)
fig,ax = plt.subplots(2,1,sharex=True)
for i in range(len(self.Es[0])):
ax[0].plot(x[:,i]) # <<< warning this is before scale and offset!
ax[1].plot(f[:,i], label=i+1)
ax[1].legend()
#self.mooringEq(self.freeDOFs[0]) # set positions back to the first ones of the iteration process
self.mooringEq(self.Xs[0], DOFtype=self.DOFtype_solve_for) # set positions back to the first ones of the iteration process
# ^^^^^^^ this only works for free DOF animation cases (not coupled DOF ones) <<<<< ...should be good now
fig, ax = self.plot() # make the initial plot to then animate
nFreeDOF, nCpldDOF = self.getDOFs()
if DOFtype=="free":
nDOF = nFreeDOF
elif DOFtype=="coupled":
nDOF = nCpldDOF
elif DOFtype=="both":
nDOF = nFreeDOF+nCpldDOF
#ms_delay = 10000/len(self.freeDOFs) # time things so the animation takes 10 seconds
line_ani = animation.FuncAnimation(fig, self.animate, np.arange(0,len(self.Xs),1), #fargs=(ax),
interval=1000, blit=False, repeat_delay=2000, repeat=True)
return line_ani
def animate(self, ts):
'''Redraws mooring system positions at step ts. Currently set up in a hack-ish way to work for animations
involving movement of either free DOFs or coupled DOFs (but not both)
'''
# following sets positions of all objects and may eventually be made into self.setPositions(self.positions[i])
X = self.Xs[ts] # Xs are already specified in solveEquilibrium's DOFtype
if self.DOFtype_solve_for == "free":
types = [0]
elif self.DOFtype_solve_for == "coupled":
types = [-1]
elif self.DOFtype_solve_for == "both":
types = [0,-1]
else:
raise ValueError("System.animate called but there is an invalid DOFtype being used")
'''
if len(self.freeDOFs) > 0:
X = self.freeDOFs[ts] # get freeDOFs of current instant
type = 0
elif len(self.cpldDOFs) > 0:
X = self.cpldDOFs[ts] # get freeDOFs of current instant
type = -1
else:
raise ValueError("System.animate called but no animation data is saved in freeDOFs or cpldDOFs")
'''
#print(ts)
i = 0 # index used to split off input positions X for each free object
# update position of free Bodies
for body in self.bodyList:
if body.type in types:
body.setPosition(X[i:i+6]) # update position of free Body
i += 6
body.redraw() # redraw Body
# update position of free Points
for point in self.pointList:
if point.type in types:
point.setPosition(X[i:i+3]) # update position of free Point
i += 3
# redraw Point?
# redraw all lines
for line in self.lineList:
line.redrawLine(0)
# ax.set_title("iteration "+str(ts))
# eventually could show net forces too? <<< if using a non MINPACK method, use callback and do this
pass #I added this line to get the above commented lines (^^^) to be included in the animate method
def animatelines(self, interval=200, repeat=True, delay=0, runtime=-1, **kwargs):
'''
Parameters
----------
dirname : string
The name of the directory folder you are in.
rootname : string
The name of the front portion of the main file name, like spar_WT1, or DTU_10MW_NAUTILUS_GoM.
interval : int, optional
The time between animation frames in milliseconds. The default is 200.
repeat : bool, optional
Whether or not to repeat the animation. The default is True.
delay : int, optional
The time between consecutive animation runs in milliseconds. The default is 0.
Returns
-------
line_ani : animation
an animation of the mooring lines based off of MoorDyn data.
Needs to be stored, returned, and referenced in a variable
'''
bathymetry = kwargs.get('bathymetry' , False ) # toggles whether to show the axes or not
opacity = kwargs.get('opacity' , 1.0 ) # the transparency of the bathymetry plot_surface
hidebox = kwargs.get('hidebox' , False ) # toggles whether to show the axes or not
rang = kwargs.get('rang' , 'hold' ) # colorbar range: if range not used, set it as a placeholder, it will get adjusted later
res = kwargs.get('res' , 10 ) # the resolution of the animation; how fluid the animation is. Higher res means spottier animation. counter-intuitive
colortension = kwargs.get("colortension" , False ) # toggle to draw the mooring lines in colors based on node tensions
cmap_tension = kwargs.get('cmap_tension' , 'rainbow' ) # the type of color spectrum desired for colortensions
# not adding cbar_tension colorbar yet since the tension magnitudes might change in the animation and the colorbar won't reflect that
# can use any other kwargs that go into self.plot()
if self.qs==1:
raise ValueError("This System is set to be quasi-static. Import MoorDyn data and make qs=0 to use this method")
# update animation function. This gets called every iteration of the animation and redraws the line in its next position
def update_Coords(tStep, tempLineList, tempax, colortension, cmap_tension): # not sure why it needs a 'tempax' input but it works better with it
for imooring in tempLineList:
imooring.redrawLine(-tStep, colortension=colortension, cmap_tension=cmap_tension)
return
# create the figure and axes to draw the animation
fig, ax = self.plot(bathymetry=bathymetry, opacity=opacity, hidebox=hidebox, rang=rang, colortension=colortension)
'''
# can do this section instead of self.plot(). They do the same thing
fig = plt.figure(figsize=(20/2.54,12/2.54))
ax = Axes3D(fig)
for imooring in self.lineList:
imooring.drawLine(0, ax)
'''
# set figure x/y/z bounds
d = 1600 # can make this an input later
ax.set_xlim((-d,d))
ax.set_ylim((-d,d));
ax.set_zlim((-self.depth, 300))
ax.set_xlabel('x'); ax.set_ylabel('y'); ax.set_zlabel('z');
# make the axes scaling equal
rangex = np.diff(ax.get_xlim3d())[0]
rangey = np.diff(ax.get_ylim3d())[0]
rangez = np.diff(ax.get_zlim3d())[0]
ax.set_box_aspect([rangex, rangey, rangez])
if runtime==-1:
nFrames = len(self.lineList[0].Tdata)
else:
itime = int(np.where(self.lineList[0].Tdata==runtime)[0])
nFrames = len(self.lineList[0].Tdata[0:itime])
# Animation: update the figure with the updated coordinates from update_Coords function
# NOTE: the animation needs to be stored in a variable, return out of the method, and referenced when calling self.animatelines()
line_ani = animation.FuncAnimation(fig, update_Coords, np.arange(1, nFrames-1, res), fargs=(self.lineList, ax, colortension, cmap_tension),
interval=1, repeat=repeat, repeat_delay=delay, | |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import collections
import codecs
import functools
import glob
import importlib
import imp
import os
import os.path as pathlib
import sys
try:
from types import FileType # py2
except ImportError:
from io import IOBase as FileType # py3
from .db import database
from .log import logger
from .helpers import (
Color, lines_diff, print_table, parse_requirements, trim_prefix,
trim_suffix
)
from .parser import parse_imports, parse_installed_packages
from .pypi import PKGS_URL, Downloader, Updater
from requests.exceptions import HTTPError
# FIXME: dirty workaround..
_special_packages = {
"dogpile.cache": "dogpile.cache",
"dogpile.core": "dogpile.core",
"ruamel.yaml": "ruamel.yaml",
"ruamel.ordereddict": "ruamel.ordereddict",
}
class RequirementsGenerator(object):
def __init__(
self,
package_root,
save_path,
ignores=None,
cmp_operator="==",
ref_comments=False,
answer_yes=False,
answer_no=False,
):
self._package_root = package_root
self._save_path = save_path
self._ignores = ignores
self._cmp_operator = cmp_operator
self._ref_comments = ref_comments
self._installed_pkgs = None
self._answer_yes = answer_yes
self._answer_no = answer_no
def __call__(self):
self.generate()
@property
def installed_pkgs(self):
if self._installed_pkgs is None:
self._installed_pkgs = parse_installed_packages()
return self._installed_pkgs
def generate(self):
packages, guess = parse_packages(
self._package_root, self._ignores, self.installed_pkgs
)
answer = 'n'
if self._answer_yes or self._answer_no:
answer = 'y' if self._answer_yes else 'n'
elif guess:
print(Color.RED('The following modules are not found yet:'))
self._print_uncertain_modules(guess)
sys.stdout.write(
Color.RED(
(
'Some of them may be not installed in local '
'environment.\nTry to search PyPI for the '
'missing modules and filter'
' some unnecessary modules? (y/[N]) '
)
)
)
sys.stdout.flush()
answer = sys.stdin.readline().strip().lower()
in_pypi = None
if answer in ('y', 'yes'):
print(Color.BLUE('Checking modules on the PyPI...'))
in_pypi = self._check_on_pypi(packages, guess)
old = self._read_requirements()
self._write_requirements(packages)
new = self._read_requirements()
self._print_diff(old, new)
if in_pypi:
for name in in_pypi:
del guess[name]
if guess and answer in ('y', 'yes'):
print(Color.RED('These modules are not found:'))
self._print_uncertain_modules(guess)
print(Color.RED('Maybe or you need update database.'))
def _check_on_pypi(self, packages, guess):
in_pypi = set()
for name, locs in guess.items():
logger.info('Checking %s on the PyPI ...', name)
downloader = Downloader()
with database() as db:
rows = db.query_all(name)
pkgs = [row.package for row in rows]
if pkgs:
in_pypi.add(name)
for pkg in _best_matchs(name, pkgs):
try:
latest = downloader.download_package(pkg).version()
packages.add_locs(pkg, latest, locs)
except HTTPError as e:
logger.error('checking %s failed: %e', pkg, e)
return in_pypi
def _print_uncertain_modules(self, modules):
for name, locs in modules.items():
print(
' {0} referenced from:\n {1}'.format(
Color.YELLOW(name), '\n '.join(locs.sorted_items())
)
)
def _read_requirements(self):
if not pathlib.isfile(self._save_path):
return
with codecs.open(self._save_path, 'rb', 'utf-8') as f:
return f.readlines()
def _write_requirements(self, packages):
print(
Color.GREEN(
'Writing requirements to "{0}"'.format(self._save_path)
)
)
package_root_parent = pathlib.dirname(
trim_suffix(self._package_root, "/")
) + "/"
ref_comments = self._ref_comments
cmp_operator = self._cmp_operator
with open(self._save_path, 'w+') as f:
f.write(
'# Automatically generated by '
'https://github.com/damnever/pigar.\n'
)
if not ref_comments:
f.write('\n')
for k, v in packages.sorted_items():
if ref_comments:
f.write('\n')
f.write(
''.join(
[
'# {0}\n'.format(
trim_prefix(c, package_root_parent)
) for c in v.comments.sorted_items()
]
)
)
if k == '-e':
f.write('{0} {1}\n'.format(k, v.version))
elif v:
f.write('{0} {1} {2}\n'.format(k, cmp_operator, v.version))
else:
f.write('{0}\n'.format(k))
def _print_diff(self, old, new):
if not old:
return
is_diff, diffs = lines_diff(old, new)
msg = 'Requirements file has been overwritten, '
if is_diff:
msg += 'here is the difference:'
print('{0}\n{1}'.format(Color.YELLOW(msg), ''.join(diffs)), end='')
else:
msg += 'no difference.'
print(Color.YELLOW(msg))
def check_requirements_latest_versions(
check_path,
ignores=None,
comparison_operator="==",
ref_comments=False,
answer_yes=False,
answer_no=False,
):
logger.debug('Starting check requirements latest version ...')
files = list()
reqs = dict()
pkg_versions = list()
installed_pkgs = None
# If no requirements file given, check in current directory.
if pathlib.isdir(check_path):
logger.debug('Searching file in "{0}" ...'.format(check_path))
files.extend(glob.glob(pathlib.join(check_path, '*requirements.txt')))
# If not found in directory, generate requirements.
if not files:
print(
Color.YELLOW(
'Requirements file not found, '
'generate requirements ...'
)
)
save_path = os.path.join(check_path, 'requirements.txt')
rg = RequirementsGenerator(
check_path,
save_path,
ignores,
comparison_operator,
ref_comments,
answer_yes,
answer_no,
)
rg()
installed_pkgs = rg.installed_pkgs
files.append(save_path)
else:
files.append(check_path)
for fpath in files:
reqs.update(parse_requirements(fpath))
logger.debug('Checking requirements latest version ...')
installed_pkgs = installed_pkgs or parse_installed_packages()
installed_pkgs = {v[0]: v[1] for v in installed_pkgs.values()}
downloader = Downloader()
for pkg in reqs:
current = reqs[pkg]
# If no version specifies in requirements,
# check in installed packages.
if current == '' and pkg in installed_pkgs:
current = installed_pkgs[pkg]
logger.debug('Checking "{0}" latest version ...'.format(pkg))
try:
latest = downloader.download_package(pkg).version()
except HTTPError as e:
logger.error('checking %s failed: %e', pkg, e)
pkg_versions.append((pkg, current, latest))
logger.debug('Checking requirements latest version done.')
print_table(pkg_versions)
def search_packages_by_names(names):
"""Search package information by names(`import XXX`).
"""
downloader = Downloader()
results = collections.defaultdict(list)
not_found = list()
installed_pkgs = parse_installed_packages()
for name in names:
logger.debug('Searching package name for "{0}" ...'.format(name))
# If exists in local environment, do not check on the PyPI.
if name in installed_pkgs:
results[name].append(list(installed_pkgs[name]) + ['local'])
# Check information on the PyPI.
else:
rows = None
with database() as db:
rows = db.query_all(name)
if rows:
for row in rows:
try:
version = downloader.download_package(row.package
).version()
results[name].append((row.package, version, 'PyPI'))
except HTTPError as e:
logger.error('checking %s failed: %e', row.package, e)
else:
not_found.append(name)
for name in results:
print('Found package(s) for "{0}":'.format(Color.GREEN(name)))
print_table(results[name], headers=['PACKAGE', 'VERSION', 'WHERE'])
if not_found:
msg = '"{0}" not found.\n'.format(Color.RED(', '.join(not_found)))
msg += 'Maybe you need to update the database.'
print(Color.YELLOW(msg))
def update_database():
"""Update database."""
print(Color.GREEN('Starting update database ...'))
print(Color.YELLOW('The process will take a long time!!!'))
logger.info('Crawling "{0}" ...'.format(PKGS_URL))
try:
updater = Updater()
except Exception:
logger.error("Fail to fetch all packages: ", exc_info=True)
print(Color.RED('Operation aborted'))
return
try:
updater.run()
updater.wait()
except (KeyboardInterrupt, SystemExit):
# FIXME(damnever): the fucking signal..
updater.cancel()
print(Color.BLUE('Operation canceled!'))
else:
print(Color.GREEN('Operation done!'))
def parse_packages(package_root, ignores=None, installed_pkgs=None):
imported_modules, user_modules = parse_imports(package_root, ignores)
installed_pkgs = installed_pkgs or parse_installed_packages()
packages = _RequiredModules()
guess = collections.defaultdict(_Locations)
try_imports = set()
for module in imported_modules:
name = module.name
if is_user_module(module, user_modules, package_root):
logger.debug("ignore imports from user module: %s", name)
continue
if is_stdlib(name) or is_stdlib(name.split('.')[0]):
logger.debug("ignore imports from stdlib: %s", name)
continue
names = []
special_name = '.'.join(name.split('.')[:2])
# Flask extension.
if name.startswith('flask.ext.'):
names.append('flask')
names.append('flask_' + name.split('.')[2])
# Special cases..
elif special_name in _special_packages:
names.append(_special_packages[special_name])
# Other.
elif '.' in name:
names.append(name.split('.')[0])
else:
names.append(name)
for name in names:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
packages.add(pkg_name, version, module.file, module.lineno)
else:
guess[name].add(module.file, module.lineno)
if module.try_:
try_imports.add(name)
names = []
for name in guess:
if name in try_imports:
names.append(name)
for name in names:
del guess[name]
return packages, guess
def _best_matchs(name, pkgs):
# If imported name equals to package name.
if name in pkgs:
return [pkgs[pkgs.index(name)]]
# If not, return all possible packages.
return pkgs
def is_user_module(module, user_modules, package_root):
name = module.name
if name.startswith("."):
return True
parts = name.split(".")
cur_mod_path = module.file[:-3]
dir_path_parts = pathlib.dirname(module.file).split("/")
nparts = len(dir_path_parts)
for i in range(0, nparts):
i = -i if i > 0 else nparts
dir_path = "/".join(dir_path_parts[:i])
if dir_path == "":
dir_path = "/"
if dir_path not in user_modules:
break
mod_paths = [pathlib.join(dir_path, "/".join(parts))]
if len(dir_path_parts[:i]) > 0 and dir_path_parts[:i][-1] == parts[0]:
mod_paths.append(dir_path)
for mod_path in mod_paths:
# FIXME(damnever): ignore the current file?
if mod_path == cur_mod_path:
continue
if mod_path in user_modules:
return True
return False
def _checked_cache(func):
checked = dict()
@functools.wraps(func)
def _wrapper(name):
if name not in checked:
checked[name] = func(name)
return checked[name]
return _wrapper
@_checked_cache
def is_stdlib(name):
"""Check whether it is stdlib module."""
exist = True
module_info = ('', '', '')
try:
module_info = imp.find_module(name)
except ImportError:
try:
# __import__(name)
importlib.import_module(name)
module_info = imp.find_module(name)
sys.modules.pop(name)
except ImportError:
exist = False
# Testcase: ResourceWarning
if isinstance(module_info[0], FileType):
module_info[0].close()
mpath = module_info[1]
if exist and (
mpath is not None and (
'site-packages' in mpath or 'dist-packages' in mpath or
('bin/' in mpath and mpath.endswith('.py'))
)
):
exist = False
return exist
class _RequiredModules(dict):
_Detail = collections.namedtuple('Detail', ['version', 'comments'])
def __init__(self):
super(_RequiredModules, self).__init__()
self._sorted = None
def add_locs(self, package, version, locations):
if package in self:
self[package].comments.extend(locations)
else:
self[package] = self._Detail(version, locations)
def add(self, package, version, file, lineno):
if package in self:
self[package].comments.add(file, lineno)
else:
loc = _Locations()
loc.add(file, lineno)
self[package] = self._Detail(version, loc)
def sorted_items(self):
if self._sorted is None:
self._sorted = sorted(self.items())
return self._sorted
def remove(self, *names):
for name in names:
if name in self:
self.pop(name)
self._sorted = None
class _Locations(dict):
"""_Locations store code locations(file, linenos)."""
def __init__(self):
super(_Locations, self).__init__()
self._sorted = None
def add(self, file, lineno):
if file in self and lineno not in self[file]:
self[file].append(lineno)
else:
self[file] = [lineno]
def extend(self, obj):
for file, linenos in obj.items():
for lineno in linenos:
self.add(file, lineno)
def sorted_items(self):
if self._sorted is None:
self._sorted = [
'{0}: {1}'.format(f, ','.join([str(n) for n in | |
""" Profiles file for all the Profiles classes in Deequ"""
from pydeequ.scala_utils import get_or_else_none, to_scala_seq, to_scala_map, scala_map_to_dict, scala_map_to_java_map, java_list_to_python_list
from pydeequ.pandas_utils import ensure_pyspark_df
from pyspark.sql import SparkSession, DataFrame
from collections import namedtuple
from pydeequ.analyzers import *
from pydeequ.metrics import *
import json
# ColumnProfilerRunner Classes
# TODO refactor documentation (https://devguide.python.org/documenting/)
DistributionValue = namedtuple('DistributionValue', 'value count ratio') # TODO: Revisit with Chris
class ColumnProfilerRunner:
"""
Primary class for interacting with the profiles module.
:param SparkSession spark_session: sparkSession
"""
def __init__(self, spark_session: SparkSession):
if not isinstance(spark_session, SparkSession): raise TypeError(f"Expected SparkSession object for spark_session, not {type(spark_session)}")
self._spark_session = spark_session
self._sc = spark_session.sparkContext
self._jvm = spark_session._jvm
self._ColumnProfilerRunner = self._jvm.com.amazon.deequ.profiles.ColumnProfilerRunner()
def onData(self, df):
"""
Starting point to construct a profile
:param df: Tabular data on which the profiles module will use
:return: The starting point to construct a profile
"""
df = ensure_pyspark_df(self._spark_session, df)
return ColumnProfilerRunBuilder(self._spark_session, df)
def run(self, data,
restrictToColumns,
lowCardinalityHistogramThreshold,
printStatusUpdates,
cacheInputs,
fileOutputOptions,
metricsRepositoryOptions,
kllParameters,
predefinedTypes):
"""
:param data:
:param restrictToColumns:
:param lowCardinalityHistogramThreshold:
:param printStatusUpdates:
:param cacheInputs:
:param fileOutputOptions:
:param metricsRepositoryOptions:
:param kllParameters:
:param predefinedTypes:
:return:
"""
raise NotImplementedError("Encapsulated")
# TODO: preprocess parameters
# return self._ColumnProfilerRunner.run(
# data,
# restrictToColumns,
# lowCardinalityHistogramThreshold,
# printStatusUpdates,
# cacheInputs,
# fileOutputOptions,
# metricsRepositoryOptions,
# kllParameters,
# predefinedTypes
# )
# ColumnProfilerRunBuilder Classes
class ColumnProfilerRunBuilder:
"""
Low level class for running profiling module
:param SparkSession spark_session: sparkSession
:param data: Tabular data which will be used to construct a profile.
"""
def __init__(self, spark_session: SparkSession, data: DataFrame):
if not isinstance(spark_session, SparkSession): raise TypeError(f"Expected SparkSession object for spark_session, not {type(spark_session)}")
if not isinstance(data, DataFrame): raise TypeError(f"Expected DataFrame object for data, not {type(data)}")
self._spark_session = spark_session
self._sc = spark_session.sparkContext
self._jvm = spark_session._jvm
self._ColumnProfilerRunBuilder = self._jvm.com.amazon.deequ.profiles.ColumnProfilerRunBuilder(data._jdf)
def run(self):
"""
A method that runs a profile check on the data to obtain a ColumnProfiles class
:return: A ColumnProfiles result
"""
run = self._ColumnProfilerRunBuilder.run()
return ColumnProfilesBuilder(self._spark_session)._columnProfilesFromColumnRunBuilderRun(run)
def printStatusUpdates(self, print_status_updates: bool):
"""
Print status updates between passes
:param bool print_status_updates: Whether to print status updates
:return: Printed status
"""
self._ColumnProfilerRunBuilder.printStatusUpdates(print_status_updates)
return self
def cacheInputs(self, cache_inputs: bool):
"""
Cache the inputs
:param bool cache_inputs: Whether to print status updates
:return: Cache inputs
"""
self._ColumnProfilerRunBuilder.cacheInputs(cache_inputs)
return self
def withLowCardinalityHistogramThreshold(self, low_cardinality_histogram_threshold: int):
"""
Set the thresholds of value until it is expensive to calculate the histograms
:param int low_cardinality_histogram_threshold: The designated threshold
:return: a set threshold
"""
self._ColumnProfilerRunBuilder.withLowCardinalityHistogramThreshold(low_cardinality_histogram_threshold)
return self
def restrictToColumns(self, restrict_to_columns: list):
"""
Can be used to specify a subset of columns to look out
:param list restrict_to_columns: Specified columns
:return: A subset of columns to look at
"""
self._ColumnProfilerRunBuilder.restrictToColumns(to_scala_seq(self._jvm, restrict_to_columns))
return self
def withKLLProfiling(self):
"""
Enable KLL Sketches profiling on Numerical columns, disabled by default.
:return: Enable KLL Sketches profiling on Numerical columns, disabled by default.
"""
self._ColumnProfilerRunBuilder.withKLLProfiling()
return self
def setKLLParameters(self, kllParameters: KLLParameters):
"""
Set kllParameters
:param KLLParameters kllParameters: kllParameters(sketchSize, shrinkingFactor, numberOfBuckets)
"""
self._ColumnProfilerRunBuilder.setKLLParameters(
self._jvm.scala.Option.apply(kllParameters._param)
)
return self
def setPredefinedTypes(self, dataTypes: dict):
"""
Set predefined data types for each column (e.g. baseline)
:param dict dataTypes: dataType map for baseline columns
:return: Baseline for each column
"""
self._ColumnProfilerRunBuilder.setPredefinedTypes(to_scala_map(self._spark_session, dataTypes))
return self
def useRepository(self, repository):
"""
Set a metrics repository associated with the current data to enable features like reusing
previously computed results and storing the results of the current run.
:param repository:A metrics repository to store and load results associated with the run
:return: Sets a metrics repository with the current data to use features on
"""
self._ColumnProfilerRunBuilder = self._ColumnProfilerRunBuilder.useRepository(repository.repository)
return self
def saveOrAppendResult(self, resultKey):
"""
A shortcut to save the results of the run or append them to existing results in the metrics repository
:param resultKey: The result key to identify the current run
:return: A saved results of the run in the metrics repository
"""
self._ColumnProfilerRunBuilder.saveOrAppendResult(resultKey.resultKey)
return self
def useSparkSession(self, sparkSession):
"""
Use a sparkSession to conveniently create output files
:param SparkSession sparkSession: sparkSession
:return: A sparksession to create output files
"""
# TODO
pass
class ColumnProfilesBuilder:
def __init__(self, spark_session: SparkSession):
"""
The results returned from the columnProfilerRunner
:param SparkSession spark_session: sparkSession
"""
if not isinstance(spark_session, SparkSession): raise TypeError(f"Expected SparkSession object, not {type(spark_session)}")
self._spark_session = spark_session
self._sc = spark_session.sparkContext
self._jvm = spark_session._jvm
self._profiles = []
self.columnProfileClasses = {
'StandardColumnProfile': StandardColumnProfile,
'NumericColumnProfile': NumericColumnProfile
}
def _columnProfilesFromColumnRunBuilderRun(self, run):
"""
Produces a Java profile based on the designated column
:param run: columnProfilerRunner result
:return: a setter for columnProfilerRunner result
"""
self._run_result = run
profile_map = self._jvm.scala.collection.JavaConversions.mapAsJavaMap(run.profiles()) # TODO from ScalaUtils
self._profiles = {column: self._columnProfileBuilder(column, profile_map[column]) for column in profile_map}
return self
@property
def profiles(self):
"""
A getter for profiles
:return: a getter for profiles
"""
return self._profiles
def _columnProfileBuilder(self, column, java_column_profile):
""" Factory function for ColumnProfile
Returns a Java profile based on the designated column
:param column: The column to run a profile on
:param java_column_profile: The profile mapped as a Java map
"""
return self.columnProfileClasses[java_column_profile.getClass().getSimpleName()](self._spark_session,
column,
java_column_profile)
class ColumnProfile:
""" Factory class for Standard and Numeric Column Profiles
The class for getting the Standard and Numeric Column profiles of the data.
:param SparkSession spark_session: sparkSession
:param column: designated column to run a profile on
:param java_column_profile: The profile mapped as a Java map
"""
def __init__(self, spark_session: SparkSession, column, java_column_profile):
if not isinstance(spark_session, SparkSession): raise TypeError(f"Expected SparkSession object for spark_session, not {type(spark_session)}")
self._spark_session = spark_session
self._sc = spark_session.sparkContext
self._jvm = spark_session._jvm
self._java_column_profile = java_column_profile
self._column = column
self._completeness = java_column_profile.completeness()
self._approximateNumDistinctValues = java_column_profile.approximateNumDistinctValues()
self._dataType = java_column_profile.dataType()
self._typeCounts = scala_map_to_dict(self._jvm, java_column_profile.typeCounts())
self._isDataTypeInferred = True if java_column_profile.isDataTypeInferred() == "true" else False
if get_or_else_none(self._java_column_profile.histogram()):
self._histogram = [DistributionValue(k, v.absolute(), v.ratio())
for k, v in scala_map_to_java_map(self._jvm, self._java_column_profile.histogram().get().values()).items()]
else:
self._histogram = None
@property
def column(self):
"""
Getter for the current column name in ColumnProfile
:return: gets the column name in the Column Profile
"""
return self._column
@property
def completeness(self):
""""
Getter that returns the completeness of data in the column
:return: gets the calculated completeness of data in the column
"""
return self._completeness
@property
def approximateNumDistinctValues(self):
"""
Getter that returns the amount of distinct values in the column
:return: gets the number of distinct values in the column
"""
return self._approximateNumDistinctValues
@property
def dataType(self):
"""
Getter that returns the datatype of the column
:return: gets the datatype of the column
"""
return str(self._dataType)
@property
def isDataTypeInferred(self):
"""
Getter that returns a boolean of whether the Data Type of the column was inferred
:return: gets the isDataTypeInferred of the column
"""
return self._isDataTypeInferred
@property
def typeCounts(self):
"""
A getter for the number of values for each datatype in the column
:return: gets the number of values for each datatype
"""
return self._typeCounts
@property
def histogram(self):
"""
A getter for the full value distribution of the column
:return: gets the histogram of a column
"""
return self._histogram
class StandardColumnProfile(ColumnProfile):
"""
Standard Column Profile class
:param SparkSession spark_session: sparkSession
:param column: the designated column of which the profile is run on
:param java_column_profile: The profile mapped as a Java map
"""
def __init__(self, spark_session: SparkSession, column, java_column_profile):
super().__init__(spark_session, column, java_column_profile)
self.all = {"completeness": self.completeness,
"approximateNumDistinctValues": self.approximateNumDistinctValues,
"dataType": self.dataType,
"isDataTypeInferred": self.isDataTypeInferred,
"typeCounts": self.typeCounts,
"histogram": self.histogram
}
def __str__(self):
"""
A JSON of the standard profiles for each column
:return: A JSON of the standard profiles
"""
return f"StandardProfiles for column: {self.column}: {json.dumps(self.all, indent=4)}"
class NumericColumnProfile(ColumnProfile):
"""
Numeric Column Profile class
:param SparkSession spark_session: sparkSession
:param column: the designated column of which the profile is run on
:param java_column_profile: The profile mapped as a Java map
"""
def __init__(self, spark_session: SparkSession, column, java_column_profile):
super().__init__(spark_session, column, java_column_profile)
# TODO: self.numRecords = java_column_profile.numRecords()
self._kll = BucketDistribution(spark_session, java_column_profile.kll().get()) if get_or_else_none(java_column_profile.kll()) else None
self._mean = get_or_else_none(java_column_profile.mean())
self._maximum = get_or_else_none(java_column_profile.maximum())
self._minimum = get_or_else_none(java_column_profile.minimum())
self._sum = get_or_else_none(java_column_profile.sum())
self._stdDev = get_or_else_none(java_column_profile.stdDev())
self._approxPercentiles = java_list_to_python_list(str(get_or_else_none(java_column_profile.approxPercentiles())), float) if get_or_else_none(java_column_profile.approxPercentiles()) else []
self.all = {"completeness": self.completeness,
"approximateNumDistinctValues": self.approximateNumDistinctValues,
"dataType": self.dataType,
"isDataTypeInferred": self.isDataTypeInferred,
"typeCounts": self.typeCounts,
"histogram": self.histogram,
"kll": str(self._kll),
"mean": self._mean,
"maximum": self._maximum,
"minimum": self._minimum,
"sum": self._sum,
"stdDev": self._stdDev,
"approxPercentiles": self._approxPercentiles
}
def __str__(self):
"""
A JSON of the numerical profiles for each column
:return: A JSON of the numerical profiles
"""
return | |
<filename>src/autotrail/workflow/default_workflow/state_machine.py
"""Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from autotrail.core.api.management import APIHandlerResponse
class State:
"""Namespace for the states of the machines."""
READY = 'Ready'
WAITING = 'Waiting'
TOSKIP = 'To Skip'
SKIPPED = 'Skipped'
PAUSED = 'Paused'
RUNNING = 'Running'
INTERRUPTED = 'Interrupted'
SUCCEEDED = 'Successful'
FAILED = 'Failed'
ERROR = 'Error'
class Action:
"""Namespace for the actions available."""
START = 'Start'
RUN = 'Run'
SUCCEED = 'Succeed'
FAIL = 'Fail'
ERROR = 'Error'
PAUSE = 'Pause'
INTERRUPT = 'Interrupt'
RESUME = 'Resume'
RERUN = 'Re-run'
MARKSKIP = 'Mark to skip'
SKIP = 'Skip'
UNSKIP = 'Unskip'
class StepFailed(Exception):
"""Exception raised when a Step fails irrecoverably."""
pass
TRANSITION_RULES = {
# The rules for state transitions as a mapping of the following form:
# {
# <State 1 (str)>: {
# <Action 1 (str)>: <State 2 (str)>,
# ...
# },
# ...
# }
# The above snippet defines the rule that if a Step (machine) is in "State 1", "Action 1" can be performed on it.
# If "Action 1" is performed, then the machine transitions to "State 2".
State.READY: {
Action.START: State.WAITING,
Action.PAUSE: State.PAUSED,
Action.MARKSKIP: State.TOSKIP},
State.WAITING: {
Action.RUN: State.RUNNING,
Action.PAUSE: State.PAUSED,
Action.MARKSKIP: State.TOSKIP},
State.TOSKIP: {
Action.UNSKIP: State.WAITING,
Action.SKIP: State.SKIPPED},
State.PAUSED: {
Action.RESUME: State.WAITING,
Action.MARKSKIP: State.TOSKIP},
State.ERROR: {
Action.RERUN: State.WAITING,
Action.MARKSKIP: State.TOSKIP},
State.RUNNING: {
Action.SUCCEED: State.SUCCEEDED,
Action.FAIL: State.FAILED,
Action.INTERRUPT: State.INTERRUPTED,
Action.ERROR: State.ERROR},
State.INTERRUPTED: {
Action.RESUME: State.WAITING,
Action.MARKSKIP: State.TOSKIP}}
def generate_preconditions(ordered_pairs, failure=False):
"""Factory to automatically generate the default pre-conditions (dictionary) for given ordered pairs of steps.
:param ordered_pairs: A list of ordered pairs like [(a, b)] where 'a' and 'b' have an 'id' attribute.
:param failure: Boolean controlling the preconditions as explained below.
:return: A list of ordered pairs like [(a, b)] (with failure=False) will return the following
preconditions:
{
<b.id>: {
<a.id>: [State.SUCCEEDED, State.SKIPPED],
},
}
Since failure=False, it means that 'b' should be executed only if 'a' is successful.
However, 'b' may be executed by skipping 'a'. This is expressed in the above pre-condition.
Similarly, the list of ordered pairs like [(a, b)] (with failure=True) will return the
following preconditions:
{
<b.id>: {
<a.id>: [State.FAILED],
},
}
Since failure=True, it means that 'b' should be executed only if 'a' has failed. Therefore,
'b' can't be executed by skipping 'a'. This is expressed in the above pre-condition.
All states are from the State namespace class.
"""
cumulative_preconditions = {}
states = [State.FAILED] if failure else [State.SUCCEEDED, State.SKIPPED]
for step, linked_step in ordered_pairs:
cumulative_preconditions.setdefault(linked_step.id, {}).update(
{step.id: states})
if not failure:
cumulative_preconditions[linked_step.id].update(cumulative_preconditions.setdefault(step.id, {}))
return cumulative_preconditions
def generate_machine_definitions(initial_state, preconditions, transition_rules):
"""Factory to generate the default state machine definitions (dictionary).
:param initial_state: A string from the State namespace representing the initial state of the associated
machine.
:param preconditions: A mapping of the form:
{
<Machine name>: [<State 1>, <State 2>, ...],
}
Where, <State 1>, <State 2>, etc., are from the State namespace representing the various
states the corresponding step needs to be in.
This is considered satisfied if and only if all the steps are in one of their associated
states.
The preconditions are applied *only* to the following state transitions:
From State.WAITING under Action.RUN
From State.TOSKIP under Action.SKIP
This is because all user-intervened actions can happen only when the step is waiting or
has not been skipped.
:param transition_rules: A mapping of the form:
{
<State 1 (from the State namespace)>: {
<Action 1 (from the Action namespace)>: <State 2 (from the State namespace)>,
...
},
...
}
The above snippet defines the rule that if a Step (machine) is in "State 1", "Action 1"
can be performed on it.
If "Action 1" is performed, then the machine transitions to "State 2".
:return: The above parameters are transformed into the state machine definitions data structure
whose structure is shown below:
{
<Name>: (<State>, {<From>: {<Action>: (<To>, [<Precondition 1>,
<Precondition 2>,
...]
)
},
},
)
}
Where:
<Name> is a string representing a machine. In this case it is the ID of the step.
<State> is from the State namespace representing the initial state of the associated
machine.
<From> is from the State namespace representing the state from which a transition
may happen.
<Action> is from the Action namespace representing an action that may be performed
to make a state transition.
<To> is from the State namespace representing the end state of an associated action.
<Precondition 1>, <Precondition 2>, etc., are mappings of the form:
{
<Machine name>: [<State 1>, <State 2>, ...],
}
Where: <State 1>, <State 2>, etc., are from the State namespace representing the
various states the corresponding machine needs to be in.
Which is considered satisfied if and only if all the machines are in one of
their associated states.
"""
state_machine_definitions = {}
for step_id, step_precondition in preconditions.items():
step_transition_rules = {}
for from_state, action_to_state_mapping in transition_rules.items():
step_transition_rules[from_state] = {}
for action, to_state in action_to_state_mapping.items():
if step_precondition and ((from_state == State.WAITING and action == Action.RUN) or
(from_state == State.TOSKIP and action == Action.SKIP)):
step_preconditions = [step_precondition]
else:
step_preconditions = []
step_transition_rules[from_state][action] = (to_state, step_preconditions)
state_machine_definitions[step_id] = (initial_state, step_transition_rules)
return state_machine_definitions
def make_state_machine_definitions(success_pairs, failure_pairs, transition_rules=None, initial_state=State.READY):
"""Factory to generate the default state machine definitions (dictionary) from the given ordered pairs.
:param success_pairs: A list of tuples (ordered pairs) of steps that is followed in the case of successful
execution. E.g., [(a, b)] means that 'b' will be executed only if 'a' is successful.
:param failure_pairs: A list of tuples (ordered pairs) of steps that is followed in the case of failed
execution. E.g., [(a, b)] means that 'b' will be executed only if 'a' has failed.
:param transition_rules: A mapping of the form:
{
<State 1 (from the State namespace)>: {
<Action 1 (from the Action namespace)>: <State 2 (from the State namespace)>,
...
},
...
}
The above snippet defines the rule that if a Step (machine) is in "State 1", "Action 1"
can be performed on it.
If "Action 1" is performed, then the machine transitions to "State 2".
:param initial_state: A string from the State namespace representing the initial state of the associated
machine.
:return: The above parameters are transformed into the state machine definitions data structure
whose structure is shown below:
{
<Name>: (<State>, {<From>: {<Action>: (<To>, [<Precondition 1>,
<Precondition 2>,
...]
)
},
},
)
}
Where:
<Name> is a string representing a machine. In this case it is the ID of the step.
<State> is from the State namespace representing the initial state of the associated
machine.
<From> is from the State namespace representing the state from which a transition
may happen.
<Action> is from the Action namespace representing an action that may be performed
to make a state transition.
<To> is from the State namespace representing the end state of an associated action.
<Precondition 1>, <Precondition 2>, etc., are mappings of the form:
{
<Machine name>: [<State 1>, <State 2>, ...],
}
Where: <State 1>, <State 2>, etc., are from the State namespace representing the
various states the corresponding machine needs to be in.
Which is considered satisfied if and only if all the machines are in one of
their associated states.
A list of ordered pairs like [(a, b)] (with failure=False) will produce the following
preconditions:
{
<b.id>: {
<a.id>: [State.SUCCEEDED, State.SKIPPED],
},
}
Since failure=False, it means that 'b' should be executed only if 'a' is | |
Raises TestFailError if the second argument is outside a tolerance
range (defined by the "fudge factor"). The default is 5% of the first
argument.
"""
if fudge is None:
fudge = arg1*0.05
if abs(arg1-arg2) > fudge:
raise TestFailError, \
msg or "%s and %s not within %s units of each other." % \
(arg1, arg2, fudge)
def assertRaises(self, exception, method, args=None, kwargs=None, msg=None):
"""Assert that a method and the given args will raise the given
exception.
Args:
exception: The exception class the method should raise.
method: the method to call with the given arguments.
args: a tuple of positional arguments.
kwargs: a dictionary of keyword arguments
msg: optional message string to be used if assertion fails.
"""
args = args or ()
kwargs = kwargs or {}
try:
rv = method(*args, **kwargs)
except exception:
return
# it might raise another exception, which is marked INCOMPLETE
raise TestFailError, msg or "%r did not raise %r." % (method, exception)
# some logical aliases
failIfEqual = assertNotEqual
failIfNotEqual = assertEqual
assertNotTrue = assertFalse
assertNotFalse = assertTrue
failUnlessRaises = assertRaises
# data storage
def save_text(self, text, filename=None):
"""Save some text into a file in the results location.
This may be called multiple times and the file will be appended to.
Arguments::
text: A blob of text as a string.
filename: the base name of the file to write. Default is test name plus timestamp.
"""
if filename is None:
filename = self.get_filename("saved", "txt")
fo = UserFile.UserFile(filename, "a")
try:
fo.write(str(text))
finally:
fo.close()
@classmethod
def open_data_file(cls, fname):
"""Open a data file located in the same directory as the test case
implmentation.
Return the file object (actually a UserFile object). Make sure you
close it.
"""
fullname = os.path.join(
os.path.dirname(sys.modules[cls.__module__].__file__), fname)
return UserFile.UserFile(fullname)
def save_data(self, data, note=None):
"""Send an add_data message to the report.
The object is serialized to JSON, so only use basic types.
Arguments:
data: any python object.
note: A text note describing the data for future users (optional).
"""
self._report.add_data(data, note)
# --------------------
class PreReq(object):
"""A holder for test prerequisite.
Used to hold the definition of a prerequisite test. A prerequisite is a
Test implementation class plus any arguments it may be called with.
No arguments means ANY arguments.
"""
def __init__(self, implementation, args=None, kwargs=None):
self.implementation = str(implementation)
self.args = args or ()
self.kwargs = kwargs or {}
def __repr__(self):
return "%s(%r, args=%r, kwargs=%r)" % \
(self.__class__.__name__, self.implementation,
self.args, self.kwargs)
def __str__(self):
return repr_test(self.implementation, self.args, self.kwargs)
class TestEntry(object):
"""Helper class used to run a Test with arguments and store the result.
Holds an instance of a Test class and the parameters it will be called
with. This actually calls the test, and stores the result value for
later summary. It also supports pre-requisite checking.
"""
def __init__(self, inst, args=None, kwargs=None, autoadded=False):
self.inst = inst
self.args = args or ()
self.kwargs = kwargs or {}
self._result = TestResult(constants.INCOMPLETE)
self.autoadded = autoadded # True if automatically added as a prerequisite.
def run(self, config=None):
"""Invoke the test with its arguments. The config argument is passed
when run directly from a TestRunner, but not from a TestSuite. It is
ignored here.
"""
try:
self._result = self.inst(*self.args, **self.kwargs)
except KeyboardInterrupt:
self._result = TestResult(constants.ABORT)
raise
return self._result
def __eq__(self, other):
return self.inst == other.inst
def _setResult(self, val):
self._result = val
result = property(lambda s: s._result, _setResult,
doc="The test rusult enumeration.")
def match_test(self, name, args, kwargs):
"""Test signature matcher.
Determine if a test name and set of arguments matches this test.
"""
return (name, args, kwargs) == \
(self.inst.test_name, self.args, self.kwargs)
def match_prerequisite(self, prereq):
"""Does this test match the specified prerequisite?
Returns True if this test matches the supplied PreReq object.
"""
return (self.inst.test_name, self.args, self.kwargs) == \
(prereq.implementation, prereq.args, prereq.kwargs)
def _get_prerequisites(self):
return self.inst.prerequisites
prerequisites = property(_get_prerequisites)
def get_signature(self):
"""Return a unique identifier for this test entry."""
try:
return self._signature
except AttributeError:
arg_sig = repr((self.args, self.kwargs))
self._signature = (id(self.inst.__class__), arg_sig)
return self._signature
signature = property(get_signature, doc="unique signature string of test.")
def abort(self):
"""Abort the test suite.
Causes this this test, and the suite, to be aborted.
"""
self._result = self.inst.abort("Abort forced by suite runner.")
return self._result
test_name = property(lambda s: s.inst.test_name)
def __repr__(self):
return repr_test(self.inst.test_name, self.args, self.kwargs)
def __str__(self):
return "%s: %s" % (self.__repr__(), self._result)
class SuiteEntry(TestEntry):
"""Entry object that wraps other Suite objects.
Used when sub-suites are run as test cases.
"""
def _get_result(self):
self._results = self.inst.results
for res in self._results:
if res.not_passed():
self._result = res
return res
self._result = TestResult(constants.PASSED)
return TestResult(constants.PASSED)
def _setResult(self, val):
self._result = val
result = property(lambda s: s._get_result(),
_setResult, None,
"""The test rusult enumeration PASSED if all tests in suite passed.""")
results = property(lambda s: s._results, None, None,
"""The actual list of test results.""")
def PruneEnd(n, l):
return l[:n]
class TestEntrySeries(TestEntry):
"""
Provides an efficient means to add many test case instances without
having to actually instantiate a TestEntry at suite build time.
"""
def __init__(self, testinstance, N, chooser, filter, args, kwargs):
from pycopia import combinatorics
self.inst = testinstance
self.args = args or ()
self.kwargs = kwargs or {}
self._sig = methods.MethodSignature(testinstance.execute)
self.result = TestResult(constants.INCOMPLETE) # Aggregate of test results
chooser = chooser or PruneEnd
arglist = []
if args:
arglist.extend(args)
if kwargs:
for name, default in self._sig.kwarguments:
try:
val = kwargs[name]
except KeyError:
pass
else:
arglist.append(val)
self._counter = combinatorics.ListCounter( combinatorics.prune(N, arglist, chooser))
if filter:
assert callable(filter)
self._filter = filter
else:
self._filter = lambda *args, **kwargs: True
test_name = property(lambda s: s.inst.test_name)
def match_prerequisite(self, prereq):
"""Does this test match the specified prerequisite?
Returns True if this test name matches the supplied PreReq object.
Only the name is checked for series tests, since the arguments may vary.
"""
return self.inst.test_name == prereq.implementation
def run(self, config=None):
resultset = {constants.PASSED:0, constants.FAILED:0,
constants.EXPECTED_FAIL:0, constants.INCOMPLETE:0}
for argset in self._counter:
kwargs = self._sig.get_keyword_arguments(argset)
# kwargs also contains non-keyword args, but python maps them to
# positional args anyway.
if self._filter(**kwargs):
entry = TestEntry(self.inst, (), kwargs)
entryresult = entry.run()
resultset[int(entryresult)] += 1
if resultset[constants.FAILED] > 0:
self.result = TestResult(constants.FAILED)
elif resultset[constants.INCOMPLETE] > 0:
self.result = TestResult(constants.INCOMPLETE)
elif resultset[constants.PASSED] > 0:
self.result = TestResult(constants.PASSED)
return self.result
def repr_test(name, args, kwargs):
"""Produce repr form of test case signature.
Returns a Test instantiation plus arguments as text (repr).
"""
return "%s()(%s)" % (name, repr_args(args, kwargs))
def repr_args(args, kwargs):
"""Stringify a set of arguments.
Arguments:
args: tuple of arguments as a function would see it.
kwargs: dictionary of keyword arguments as a function would see it.
Returns:
String as you would write it in a script.
"""
args_s = (("%s, " if kwargs else "%s") % ", ".join(map(repr, args))) if args else ""
kws = ", ".join(map(lambda it: "%s=%r" % (it[0], it[1]), kwargs.items()))
return "%s%s" % (args_s, kws)
def parse_args(arguments):
"""Take a string of arguments and keyword arguments and convert back to
objects.
"""
# Try a possibly icky method of constructing a temporary function string
# and exec it (leverage Python parser and argument handling).
ANY = None # To allow "ANY" keyword in prereq spec.
def _ArgGetter(*args, **kwargs):
return args, kwargs
funcstr = "args, kwargs = _ArgGetter(%s)\n" % arguments
exec funcstr in locals()
return args, kwargs # set by exec call
def timestamp(t):
"""standard timesstamp string creator."""
return timelib.strftime("%a, %d %b %Y %H:%M:%S %Z", timelib.localtime(t))
class TestSuite(object):
"""A Test holder and runner.
A TestSuite contains a set of test cases (subclasses of Test class) that
are run sequentially, in the order added. It monitors abort status of
each test, and aborts the suite if required.
To run it, create a TestSuite object (or a subclass with some methods
overridden), add tests with the `add_test()` method, and then call the
instance. The 'initialize()' method will be run with the arguments given
when called.
"""
def __init__(self, cf, nested=0, name=None):
self.config = cf
self.report = cf.report
self._debug = cf.flags.DEBUG
self._tests = []
self._testset = set()
self._multitestset = set()
self._nested = nested
cl = self.__class__
self.test_name | |
IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient', 'Rt Ion Machine Verification', 'Unified Procedure Step', 'Modality Performed Procedure Step', 'Rt Conventional Machine Verification', 'General Purpose Performed Procedure Step', 'General Purpose Scheduled Procedure Step'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# ImplantSize
0x00686210L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# RTImageSID
0x30020026L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# ViewName
0x00082127L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# Date
0x0040A121L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# BrachyTreatmentTechnique
0x300A0200L: {
'RT PLAN IOD': ['Plan'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
None: ['Treatment Record', 'Plan'],
},
# TableTopVerticalPosition
0x300A0128L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# TableTopLongitudinalPosition
0x300A0129L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# NumberOfAverages
0x00180083L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# DoseSummationType
0x3004000AL: {
'RT DOSE IOD': ['Dose'],
None: ['Dose'],
},
# PhaseVector
0x00540030L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# BlindSpotYCoordinate
0x00240108L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# ImplantAssemblyTemplateTargetAnatomySequence
0x00760010L: {
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Implant Assembly'],
},
# CollimatorShape
0x00181700L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# DecayCorrectionDateTime
0x00189701L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# CollimatorLeftVerticalEdge
0x00181702L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# CollimatorRightVerticalEdge
0x00181704L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# ExposureControlModeDescription
0x00187062L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# CollimatorUpperHorizontalEdge
0x00181706L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# GraphicLayerSequence
0x00700060L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Presentation State'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
},
# CollimatorLowerHorizontalEdge
0x00181708L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# RequestedResolutionID
0x20200050L: {
'BASIC FILM BOX IOD': ['Basic Film Box'],
None: ['Basic Film Box'],
},
# RTROIObservationsSequence
0x30060080L: {
'RT STRUCTURE SET IOD': ['Structure Set'],
None: ['Structure Set'],
},
# ReferencedWaveformChannels
0x0040A0B0L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# CenterOfCircularCollimator
0x00181710L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# IntraOcularPressure
0x0022000BL: {
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
None: ['Image'],
},
# RadiusOfCircularCollimator
0x00181712L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# WaveformDataDisplayScale
0x003A0230L: {
'RESPIRATORY WAVEFORM IOD': ['Waveform'],
None: ['Waveform'],
'GENERAL AUDIO WAVEFORM IOD': ['Waveform'],
'BASIC VOICE AUDIO IOD': ['Waveform'],
'HEMODYNAMIC IOD': ['Waveform'],
'GENERAL ECG IOD': ['Waveform'],
'BASIC CARDIAC EP IOD': ['Waveform'],
'AMBULATORY ECG IOD': ['Waveform'],
'ARTERIAL PULSE WAVEFORM IOD': ['Waveform'],
'12-LEAD ECG IOD': ['Waveform'],
},
# NumberOfEnergyWindows
0x00540011L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# StartRelativeDensityDifferenceThreshold
0x00189716L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# StartCardiacTriggerCountThreshold
0x00189717L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# HistogramSequence
0x00603000L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# TerminationCountsThreshold
0x00189719L: {
'ENHANCED PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# ImageCenterPointCoordinatesSequence
0x0040071AL: {
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# MeasuredValueSequence
0x0040A300L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# PresentedDataFlag
0x00240037L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# PatientOrientationCodeSequence
0x00540410L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Series'],
None: ['Image', 'Series'],
'PET IMAGE IOD': ['Series'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
},
# FrameOfInterestType
0x00286023L: {
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'XRF | |
################################################################################
# Copyright (C) 2015 Surfacingx #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import xbmc, xbmcaddon, xbmcgui, xbmcplugin, os, sys, xbmcvfs, HTMLParser, glob, zipfile, json, base64
import shutil
import errno
import string
import random
import urllib2,urllib
import re
import downloader
import extract
import uservar
import skinSwitch
import time
import pyqrcode
from datetime import date, datetime, timedelta
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
from string import digits
ADDON_ID = uservar.ADDON_ID
ADDONTITLE = uservar.ADDONTITLE
ADDON = xbmcaddon.Addon(ADDON_ID)
VERSION = ADDON.getAddonInfo('version')
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0'
DIALOG = xbmcgui.Dialog()
DP = xbmcgui.DialogProgress()
HOME = xbmc.translatePath('special://home/')
XBMC = xbmc.translatePath('special://xbmc/')
LOG = xbmc.translatePath('special://logpath/')
PROFILE = xbmc.translatePath('special://profile/')
SOURCE = xbmc.translatePath('source://')
ADDONS = os.path.join(HOME, 'addons')
USERDATA = os.path.join(HOME, 'userdata')
PLUGIN = os.path.join(ADDONS, ADDON_ID)
PACKAGES = os.path.join(ADDONS, 'packages')
ADDOND = os.path.join(USERDATA, 'addon_data')
ADDONDATA = os.path.join(USERDATA, 'addon_data', ADDON_ID)
ADVANCED = os.path.join(USERDATA, 'advancedsettings.xml')
SOURCES = os.path.join(USERDATA, 'sources.xml')
GUISETTINGS = os.path.join(USERDATA, 'guisettings.xml')
FAVOURITES = os.path.join(USERDATA, 'favourites.xml')
PROFILES = os.path.join(USERDATA, 'profiles.xml')
THUMBS = os.path.join(USERDATA, 'Thumbnails')
DATABASE = os.path.join(USERDATA, 'Database')
FANART = os.path.join(PLUGIN, 'fanart.jpg')
ICON = os.path.join(PLUGIN, 'icon.png')
ART = os.path.join(PLUGIN, 'resources', 'art')
WIZLOG = os.path.join(ADDONDATA, 'wizard.log')
WHITELIST = os.path.join(ADDONDATA, 'whitelist.txt')
QRCODES = os.path.join(ADDONDATA, 'QRCodes')
SKIN = xbmc.getSkinDir()
TODAY = date.today()
TOMORROW = TODAY + timedelta(days=1)
TWODAYS = TODAY + timedelta(days=2)
THREEDAYS = TODAY + timedelta(days=3)
ONEWEEK = TODAY + timedelta(days=7)
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
EXCLUDES = uservar.EXCLUDES
BUILDFILE = uservar.BUILDFILE
APKFILE = uservar.APKFILE
YOUTUBEFILE = uservar.YOUTUBEFILE
ADDONFILE = uservar.ADDONFILE
ADVANCEDFILE = uservar.ADVANCEDFILE
AUTOUPDATE = uservar.AUTOUPDATE
WIZARDFILE = uservar.WIZARDFILE
NOTIFICATION = uservar.NOTIFICATION
ENABLE = uservar.ENABLE
AUTOINSTALL = uservar.AUTOINSTALL
REPOADDONXML = uservar.REPOADDONXML
REPOZIPURL = uservar.REPOZIPURL
CONTACT = uservar.CONTACT
COLOR1 = uservar.COLOR1
COLOR2 = uservar.COLOR2
INCLUDEVIDEO = ADDON.getSetting('includevideo')
INCLUDEALL = ADDON.getSetting('includeall')
INCLUDEBOB = ADDON.getSetting('includebob')
INCLUDEPHOENIX = ADDON.getSetting('includephoenix')
INCLUDESPECTO = ADDON.getSetting('includespecto')
INCLUDEGENESIS = ADDON.getSetting('includegenesis')
INCLUDEEXODUS = ADDON.getSetting('includeexodus')
INCLUDEONECHAN = ADDON.getSetting('includeonechan')
INCLUDESALTS = ADDON.getSetting('includesalts')
INCLUDESALTSHD = ADDON.getSetting('includesaltslite')
SHOWADULT = ADDON.getSetting('adult')
WIZDEBUGGING = ADDON.getSetting('addon_debug')
DEBUGLEVEL = ADDON.getSetting('debuglevel')
ENABLEWIZLOG = ADDON.getSetting('wizardlog')
CLEANWIZLOG = ADDON.getSetting('autocleanwiz')
CLEANWIZLOGBY = ADDON.getSetting('wizlogcleanby')
CLEANDAYS = ADDON.getSetting('wizlogcleandays')
CLEANSIZE = ADDON.getSetting('wizlogcleansize')
CLEANLINES = ADDON.getSetting('wizlogcleanlines')
INSTALLMETHOD = ADDON.getSetting('installmethod')
DEVELOPER = ADDON.getSetting('developer')
THIRDPARTY = ADDON.getSetting('enable3rd')
THIRD1NAME = ADDON.getSetting('wizard1name')
THIRD1URL = ADDON.getSetting('wizard1url')
THIRD2NAME = ADDON.getSetting('wizard2name')
THIRD2URL = ADDON.getSetting('wizard2url')
THIRD3NAME = ADDON.getSetting('wizard3name')
THIRD3URL = ADDON.getSetting('wizard3url')
BACKUPLOCATION = ADDON.getSetting('path') if not ADDON.getSetting('path') == '' else 'special://home/'
MYBUILDS = os.path.join(BACKUPLOCATION, 'My_Builds', '')
LOGFILES = ['log', 'xbmc.old.log', 'kodi.log', 'kodi.old.log', 'spmc.log', 'spmc.old.log', 'tvmc.log', 'tvmc.old.log']
DEFAULTPLUGINS = ['metadata.album.universal', 'metadata.artists.universal', 'metadata.common.fanart.tv', 'metadata.common.imdb.com', 'metadata.common.musicbrainz.org', 'metadata.themoviedb.org', 'metadata.tvdb.com', 'service.xbmc.versioncheck']
MAXWIZSIZE = [100, 200, 300, 400, 500, 1000]
MAXWIZLINES = [100, 200, 300, 400, 500]
MAXWIZDATES = [1, 2, 3, 7]
###########################
###### Settings Items #####
###########################
def getS(name):
try: return ADDON.getSetting(name)
except: return False
def setS(name, value):
try: ADDON.setSetting(name, value)
except: return False
def openS(name=""):
ADDON.openSettings()
def clearS(type):
build = {'buildname':'', 'buildversion':'', 'buildtheme':'', 'latestversion':'', 'lastbuildcheck':'2016-01-01'}
install = {'installed':'false', 'extract':'', 'errors':''}
default = {'defaultskinignore':'false', 'defaultskin':'', 'defaultskinname':''}
lookfeel = ['default.enablerssfeeds', 'default.font', 'default.rssedit', 'default.skincolors', 'default.skintheme', 'default.skinzoom', 'default.soundskin', 'default.startupwindow', 'default.stereostrength']
if type == 'build':
for set in build:
setS(set, build[set])
for set in install:
setS(set, install[set])
for set in default:
setS(set, default[set])
for set in lookfeel:
setS(set, '')
elif type == 'default':
for set in default:
setS(set, default[set])
for set in lookfeel:
setS(set, '')
elif type == 'install':
for set in install:
setS(set, install[set])
elif type == 'lookfeel':
for set in lookfeel:
setS(set, '')
###########################
###### Display Items ######
###########################
# def TextBoxes(heading,announce):
# class TextBox():
# WINDOW=10147
# CONTROL_LABEL=1
# CONTROL_TEXTBOX=5
# def __init__(self,*args,**kwargs):
# ebi("ActivateWindow(%d)" % (self.WINDOW, )) # activate the text viewer window
# self.win=xbmcgui.Window(self.WINDOW) # get window
# xbmc.sleep(500) # give window time to initialize
# self.setControls()
# def setControls(self):
# self.win.getControl(self.CONTROL_LABEL).setLabel(heading) # set heading
# try: f=open(announce); text=f.read()
# except: text=announce
# self.win.getControl(self.CONTROL_TEXTBOX).setText(str(text))
# return
# TextBox()
# while xbmc.getCondVisibility('Window.IsVisible(10147)'):
# xbmc.sleep(500)
ACTION_PREVIOUS_MENU = 10 ## ESC action
ACTION_NAV_BACK = 92 ## Backspace action
ACTION_MOVE_LEFT = 1 ## Left arrow key
ACTION_MOVE_RIGHT = 2 ## Right arrow key
ACTION_MOVE_UP = 3 ## Up arrow key
ACTION_MOVE_DOWN = 4 ## Down arrow key
ACTION_MOUSE_WHEEL_UP = 104 ## Mouse wheel up
ACTION_MOUSE_WHEEL_DOWN = 105 ## Mouse wheel down
ACTION_MOVE_MOUSE = 107 ## Down arrow key
ACTION_SELECT_ITEM = 7 ## Number Pad Enter
ACTION_BACKSPACE = 110 ## ?
ACTION_MOUSE_LEFT_CLICK = 100
ACTION_MOUSE_LONG_CLICK = 108
def TextBox(title, msg):
class TextBoxes(xbmcgui.WindowXMLDialog):
def onInit(self):
self.title = 101
self.msg = 102
self.scrollbar = 103
self.okbutton = 201
self.showdialog()
def showdialog(self):
self.getControl(self.title).setLabel(title)
self.getControl(self.msg).setText(msg)
self.setFocusId(self.scrollbar)
def onClick(self, controlId):
if (controlId == self.okbutton):
self.close()
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU: self.close()
elif action == ACTION_NAV_BACK: self.close()
tb = TextBoxes( "Textbox.xml" , ADDON.getAddonInfo('path'), 'DefaultSkin', title=title, msg=msg)
tb.doModal()
del tb
def highlightText(msg):
msg = msg.replace('\n', '[NL]')
matches = re.compile("-->Python callback/script returned the following error<--(.+?)-->End of Python script error report<--").findall(msg)
for item in matches:
string = '-->Python callback/script returned the following error<--%s-->End of Python script error report<--' % item
msg = msg.replace(string, '[COLOR red]%s[/COLOR]' % string)
msg = msg.replace('WARNING', '[COLOR yellow]WARNING[/COLOR]').replace('ERROR', '[COLOR red]ERROR[/COLOR]').replace('[NL]', '\n').replace(': EXCEPTION Thrown (PythonToCppException) :', '[COLOR red]: EXCEPTION Thrown (PythonToCppException) :[/COLOR]')
msg = msg.replace('\\\\', '\\').replace(HOME, '')
return msg
def LogNotify(title, message, times=2000, icon=ICON,sound=False):
DIALOG.notification(title, message, icon, int(times), sound)
#ebi('XBMC.Notification(%s, %s, %s, %s)' % (title, message, times, icon))
def percentage(part, whole):
return 100 * float(part)/float(whole)
def addonUpdates(do=None):
setting = '"general.addonupdates"'
if do == 'set':
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (setting)
response = xbmc.executeJSONRPC(query)
match = re.compile('{"value":(.+?)}').findall(response)
if len(match) > 0: default = match[0]
else: default = 0
setS('default.addonupdate', str(default))
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (setting, '2')
response = xbmc.executeJSONRPC(query)
elif do == 'reset':
try:
value = int(float(getS('default.addonupdate')))
except:
value = 0
if not value in [0, 1, 2]: value = 0
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (setting, value)
response = xbmc.executeJSONRPC(query)
###########################
###### Build Info #########
###########################
def checkBuild(name, ret):
if not workingURL(BUILDFILE) == True: return False
link = openURL(BUILDFILE).replace('\n','').replace('\r','').replace('\t','').replace('gui=""', 'gui="http://"').replace('theme=""', 'theme="http://"')
match = re.compile('name="%s".+?ersion="(.+?)".+?rl="(.+?)".+?ui="(.+?)".+?odi="(.+?)".+?heme="(.+?)".+?con="(.+?)".+?anart="(.+?)".+?review="(.+?)".+?dult="(.+?)".+?escription="(.+?)"' % name).findall(link)
if len(match) > 0:
for version, url, gui, kodi, theme, icon, fanart, preview, adult, description in match:
if ret == 'version': return version
elif ret == 'url': return url
elif ret == 'gui': return gui
elif ret == 'kodi': return kodi
elif ret == 'theme': return theme
elif ret == 'icon': return icon
elif ret == 'fanart': return fanart
elif ret == 'preview': return preview
elif ret == 'adult': return adult
elif ret == 'description': return description
elif ret == 'all': return name, version, url, gui, kodi, theme, icon, fanart, preview, adult, description
else: return False
def checkTheme(name, theme, ret):
themeurl = checkBuild(name, 'theme')
if not workingURL(themeurl) == True: return False
link = openURL(themeurl).replace('\n','').replace('\r','').replace('\t','')
match = re.compile('name="%s".+?rl="(.+?)".+?con="(.+?)".+?anart="(.+?)".+?dult=(.+?).+?escription="(.+?)"' % theme).findall(link)
if len(match) > 0:
for url, icon, fanart, adult, description in match:
if ret == 'url': return url
elif ret == 'icon': return icon
elif ret == 'fanart': return fanart
elif ret == 'adult': return adult
elif ret == 'description': return description
elif ret == 'all': return name, theme, url, icon, fanart, adult, description
else: return False
def checkWizard(ret):
if not workingURL(WIZARDFILE) == True: return False
link = openURL(WIZARDFILE).replace('\n','').replace('\r','').replace('\t','')
match = re.compile('id="%s".+?ersion="(.+?)".+?ip="(.+?)"' % ADDON_ID).findall(link)
if len(match) > 0:
for version, zip in match:
if ret == 'version': return version
elif ret == 'zip': return zip
elif ret == 'all': return ADDON_ID, version, zip
else: return False
def buildCount(ver=None):
link = openURL(BUILDFILE).replace('\n','').replace('\r','').replace('\t','')
match = re.compile('name="(.+?)".+?odi="(.+?)".+?dult="(.+?)"').findall(link)
total = 0; count15 = 0; count16 = 0; count17 = 0; count18 = 0; hidden = 0; adultcount = 0
if len(match) > 0:
for name, kodi, adult in match:
if not SHOWADULT == 'true' and adult.lower() == 'yes': hidden += 1; adultcount +=1; continue
if not DEVELOPER == 'true' and strTest(name): hidden += 1; continue
kodi = int(float(kodi))
total += 1
if kodi == 18: count18 += 1
elif kodi == 17: count17 += 1
elif kodi == 16: count16 += 1
elif kodi <= 15: count15 += 1
return total, count15, count16, count17, count18, adultcount, hidden
def strTest(string):
a = (string.lower()).split(' ')
if 'test' in a: return True
else: return False
def themeCount(name, count=True):
themefile = checkBuild(name, 'theme')
if themefile == 'http://': return False
link = openURL(themefile).replace('\n','').replace('\r','').replace('\t','')
match = re.compile('name="(.+?)".+?dult="(.+?)"').findall(link)
if len(match) == 0: return False
themes = []
for item, adult in match:
if not SHOWADULT == 'true' and adult.lower() == 'yes': continue
themes.append(item)
if len(themes) > 0:
if count == True: return len(themes)
else: return themes
else: return False
def thirdParty(url=None):
if url == None: return
link = openURL(url).replace('\n','').replace('\r','').replace('\t','')
match = re.compile('name="(.+?)".+?ersion="(.+?)".+?rl="(.+?)".+?odi="(.+?)".+?con="(.+?)".+?anart="(.+?)".+?dult="(.+?)".+?escription="(.+?)"').findall(link)
match2 = re.compile('name="(.+?)".+?rl="(.+?)".+?mg="(.+?)".+?anart="(.+?)".+?escription="(.+?)"').findall(link)
if len(match) > 0:
return True, match
elif len(match2) > 0:
return False, match2
else:
return False, []
###########################
###### URL Checks #########
###########################
def workingURL(url):
if url in ['http://', 'https://', '']: return False
check = 0; status = ''
while check < 3:
check += 1
try:
req = urllib2.Request(url)
req.add_header('User-Agent', USER_AGENT)
response = urllib2.urlopen(req)
response.close()
status = True
break
except Exception, e:
status = str(e)
log("Working Url Error: %s [%s]" % (e, url))
xbmc.sleep(500)
return status
def openURL(url):
req = urllib2.Request(url)
req.add_header('User-Agent', USER_AGENT)
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
###########################
###### Misc Functions #####
###########################
def getKeyboard( default="", heading="", hidden=False ):
keyboard = xbmc.Keyboard( default, heading, hidden )
keyboard.doModal()
if keyboard.isConfirmed():
return unicode( keyboard.getText(), "utf-8" )
return default
def getSize(path, total=0):
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total += os.path.getsize(fp)
return total
def convertSize(num, suffix='B'):
for unit in ['', 'K', 'M', 'G']:
if abs(num) < 1024.0:
return "%3.02f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.02f %s%s" | |
<reponame>samskivert/farbot<filename>farb/sysinstall.py
# sysinstall.py vi:ts=4:sw=4:expandtab:
#
# Copyright (c) 2006-2008 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import os
import string
import farb
class ConfigSection(object):
"""
Abstract class implementing re-usable functions for install.cfg(8)
configuration sections.
"""
def _serializeOptions(self, output):
"""
Serialize all install.cfg options for this section
and the to an output file.
Concrete subclasses MUST provide a sectionOptions list as a class
attribute. This list must contain all valid install.cfg options for
the section, in the order required by sysinstall(8).
Given the sectionOptions list, this implementation will introspect
'self' for attributes with names that match the sectionOptions.
Any available attributes will be used, and any missing attributes
will be ignored.
@param output: Open, writable file handle
"""
for option in self.sectionOptions:
if hasattr(self, option):
output.write('%s=%s\n' % (option, getattr(self, option)))
def _serializeCommands(self, output, commands=None):
"""
Write out all commands listed in the sectionCommands class
attribute.
@param output: Open, writable file handle
@param commands: Commands to output. Defaults to sectionCommands.
"""
if (not commands):
commands = self.sectionCommands
for command in commands:
output.write('%s\n' % (command))
class NetworkConfig(ConfigSection):
"""
install.cfg(8) network configuration section.
"""
# Section option names
sectionOptions = (
'hostname', # New Server's Host Name
'domainname', # New Server's Domain Name
'netDev', # Network Interface
'nfs', # NFS Installation Media
'tryDHCP' # DHCP an address
)
# Default option values
tryDHCP = 'YES'
# Section commands
sectionCommands = (
'mediaSetNFS',
)
def __init__(self, section, config):
"""
Initialize network configuration for a given
installation.
@param section: ZConfig Installation section
@param config: ZConfig Farbot Config
"""
# Install-specific Options
self.hostname = section.hostname
self.domainname = section.domain
self.netDev = section.networkdevice
# FarBot-wide Options
self.nfshost = config.Releases.nfshost
self.nfspath = os.path.join(config.Releases.installroot, section.release.lower())
self.nfs = self.nfshost + ':' + self.nfspath
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DistSetConfig(ConfigSection):
"""
install.cfg(8) distribution set configuration section.
"""
# Section option names
sectionOptions = (
'dists', # Install these distribution sets
)
# Section commands
sectionCommands = (
'distSetCustom',
)
def __init__(self, release, config):
"""
Initialize distribution set configuration for a given
installation.
@param release: ZConfig Release section
@param config: ZConfig Farbot Config
"""
# Flatten lists of dists, source dists, and kernel dists, inserting the
# sub lists after src or kernels. Not sure if it really necessary to have
# those sub lists in that exact location, but let's be safe.
self.dists = copy.copy(release.dists)
if self.dists.count('src') > 0:
self.dists.insert(self.dists.index('src') + 1, string.join(release.sourcedists))
if self.dists.count('kernels') > 0:
self.dists.insert(self.dists.index('kernels') + 1, string.join(release.kerneldists))
self.dists = string.join(self.dists)
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DiskLabelConfig(ConfigSection):
"""
install.cfg(8) FreeBSD labels (partition) configuration section.
"""
# Section option names are generated
# Section commands
sectionCommands = (
'diskLabelEditor',
)
def __init__(self, section, diskDevice):
"""
Initialize a disk label configuration for a given
partition map and device.
@param section: ZConfig PartitionMap section
@param diskDevice: Device to label (eg ad0s1)
"""
# Section option names are generated
self.sectionOptions = []
self.diskDevice = diskDevice
# Grab our partition map
for part in section.Partition:
# Build device + slice + partition number, and append it to
# sectionOptions
slice = self.diskDevice + '-' + part.getSectionName()
self.sectionOptions.append(slice)
# Partition settings
if (part.softupdates):
setattr(self, slice, "%s %d %s 1" % (part.type, part.size, part.mount))
else:
setattr(self, slice, "%s %d %s" % (part.type, part.size, part.mount))
# Ensure that partitions are in order (1 ... 9)
self.sectionOptions.sort()
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DiskPartitionConfig(ConfigSection):
"""
install.cfg(8) BIOS partition configuration section.
"""
# Section option names
sectionOptions = (
'disk', # Disk to partition
'partition', # Partitioning method
'bootManager', # Boot manage to install
)
# We hardcode the use of the entire disk
partition = 'all'
# Hardcode the use of the boot manager, too
bootManager = 'standard'
# Section commands
sectionCommands = (
'diskPartitionEditor',
)
def __init__(self, section, config):
"""
Initialize a disk partition configuration for a given
disk section.
@param section: ZConfig Disk section
@param config: ZConfig Farbot Config
"""
self.disk = section.getSectionName()
# Grab our partition map
# If it doesn't exist, complain loudly
self.diskLabelConfig = None
for map in config.Partitions.PartitionMap:
if (section.partitionmap.lower() == map.getSectionName()):
# Set up the disk labels. Always s1!
self.diskLabelConfig = DiskLabelConfig(map, self.disk + 's1')
break
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
self.diskLabelConfig.serialize(output)
class SystemCommandConfig(ConfigSection):
"""
install.cfg(8) system command configuration section.
"""
# Section option names
sectionOptions = (
'command', # Command name and arguments
)
# Section commands
sectionCommands = (
'system',
)
def __init__(self, cmd):
"""
Initialize system command configuration for a given
installation.
@param section: ZConfig command key value
"""
# Build command + options
self.cmd = cmd
setattr(self, 'command', "%s" % (cmd))
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class PackageConfig(SystemCommandConfig):
"""
install.cfg(8) package install configuration section.
Sysinstall's dependency handling is seriously broken,
relying on an INDEX that doesn't necessarily reflect reality.
We skip the sysinstall package installation code entirely and
use a SystemCommand to call pkg_add(8) ourselves post-install.
"""
installPackageScript = os.path.join('/dist', os.path.basename(farb.INSTALL_PACKAGE_SH))
def __init__(self, section):
"""
Initialize package install configuration for a given
installation.
@param section: ZConfig Package section
"""
# /dist/install_package.sh <package name>
self.package = section.package
cmd = "%s %s" % (self.installPackageScript, self.package)
super(PackageConfig, self).__init__(cmd)
class InstallationConfig(ConfigSection):
"""
InstallationConfig instances represent a
complete install.cfg file for sysinstall(8)
"""
# Section option names
sectionOptions = (
'debug',
'nonInteractive',
'noWarn'
)
# Defaults
debug = 'YES'
nonInteractive = 'YES'
noWarn = 'YES'
# Commands needed to start up the interactive partitioner
interactivePartitionCommands = (
'diskInteractive="YES"', # Partition and label disks interactively
'diskPartitionEditor', # Run disk partition (MBR) editor
'diskLabelEditor' # Run disk label editor
)
# Pre-package commands
prePackageCommands = (
'diskLabelCommit', # Write disk labels to disk
'installCommit' # Write install distribution to disk
)
# Section commands
sectionCommands = (
'shutdown',
)
def __init__(self, section, config):
"""
Initialize a new installation configuration.
@param section: ZConfig Installation section
@param config: ZConfig Farbot Config
"""
self.name = section.getSectionName()
# Network configuration
self.networkConfig = NetworkConfig(section, config)
# Distribution sets
for release in config.Releases.Release:
if release.getSectionName() == section.release.lower():
self.distSetConfig = DistSetConfig(release, config)
break
# Disks (Partitions and Labels)
self.diskPartitionConfigs = []
for disk in section.Disk:
diskPartitionConfig = DiskPartitionConfig(disk, config)
self.diskPartitionConfigs.append(diskPartitionConfig)
# Packages
self.packageConfigs = []
for psetName in section.packageset:
foundPset = False
for pset in config.PackageSets.PackageSet:
if (psetName.lower() == pset.getSectionName()):
foundPset = True
break
for package in pset.Package:
pkgc = PackageConfig(package)
self.packageConfigs.append(pkgc)
# System Commands
self.systemCommandConfigs = []
if (section.PostInstall):
for cmd in section.PostInstall.command:
systemCommandConfig = SystemCommandConfig(cmd)
self.systemCommandConfigs.append(systemCommandConfig)
def serialize(self, output):
# Global configuration options
self._serializeOptions(output)
# Network configuration
self.networkConfig.serialize(output)
# Select distribution sets
self.distSetConfig.serialize(output)
# Disk formatting
for disk in self.diskPartitionConfigs:
disk.serialize(output)
# If we have no diskPartitionConfigs, partition interactively
| |
the moves that friendly pieces can perform to save the king
calculate_white_check_moves()
else:
if all_white_moves & 1 << black_king_eightx_y > 0:
# If the king is in check, discover the type and location of the checking pieces
for move in white_move_list:
if move[1] == black_king_eightx_y:
checkers.append(move[0])
# Calculate the moves that friendly pieces can perform to save the king
calculate_black_check_moves()
def calculate_white_check_moves():
global white_move_list
defence_moves = 0
number_of_checkers = len(checkers)
# If there is one piece checking the king, then friendly pieces can save the king
# If there is more than one piece checking the king, then only the king can save himself.
if number_of_checkers == 1:
# If there's only one checker, capturing it is always an option
defence_moves += 1 << checkers[0]
# If the single checking piece is not a pawn or a knight, then the checked side can defend also defend by interposing a piece
if board[checkers[0]] in ['b', 'r', 'q']:
if ((1 << white_king_eightx_y) + (1 << checkers[0])) in intervening_squares_bitboards:
defence_moves += intervening_squares_bitboards[(1 << white_king_eightx_y) + (1 << checkers[0])]
# Unpack the defense moves bitboard.
defence_move_list = []
while defence_moves > 0:
defence_move_list.append(bit_significance_mapping[defence_moves & -defence_moves])
defence_moves -= (defence_moves & -defence_moves)
# Get a list of moves to remove, and populate it with castles
moves_to_remove = set([(32, 16), (32, 48)])
for move in white_move_list:
if move[0] != white_king_eightx_y and move[1] not in defence_move_list:
moves_to_remove.add(move)
# If the king is being checked by a ray piece, He may not move away from a checking ray piece on the ray that he is being checked with.
for checker in checkers:
if board[checker] in ['b', 'r', 'q']:
if (checker, white_king_eightx_y) in ray_check_forbidden_move_dict:
moves_to_remove.add((white_king_eightx_y, ray_check_forbidden_move_dict[(checker, white_king_eightx_y)]))
white_move_list = white_move_list - moves_to_remove
def calculate_black_check_moves():
global black_move_list
defence_moves = 0
number_of_checkers = len(checkers)
# If there is one piece checking the king, then friendly pieces can save the king
# If there is more than one piece checking the king, then only the king can save himself.
if number_of_checkers == 1:
# If there's only one checker, capturing it is always an option
defence_moves += 1 << checkers[0]
# If the single checking piece is not a pawn or a knight, then the checked side can defend also defend by interposing a piece
if board[checkers[0]] in ['B', 'R', 'Q']:
if ((1 << black_king_eightx_y) + (1 << checkers[0])) in intervening_squares_bitboards:
defence_moves += intervening_squares_bitboards[(1 << black_king_eightx_y) + (1 << checkers[0])]
# Unpack the defense moves bitboard.
defence_move_list = []
while defence_moves > 0:
defence_move_list.append(bit_significance_mapping[defence_moves & -defence_moves])
defence_moves -= (defence_moves & -defence_moves)
# Get a list of moves to remove
moves_to_remove = set([(39, 23), (39, 55)])
for move in black_move_list:
if move[0] != black_king_eightx_y and move[1] not in defence_move_list:
moves_to_remove.add(move)
# If the king is being checked by a ray piece, He may not move away from a checking ray piece on the ray that he is being checked with.
for checker in checkers:
if board[checker] in ['B', 'R', 'Q']:
if (checker, black_king_eightx_y) in ray_check_forbidden_move_dict:
moves_to_remove.add((black_king_eightx_y, ray_check_forbidden_move_dict[(checker, black_king_eightx_y)]))
black_move_list = black_move_list - moves_to_remove
# ****************************************************************************
# ***************************** EVALUATION ***********************************
# ****************************************************************************
def evaluate_position():
position_value = 0
for i in range(64):
position_value += piece_values[board[i]]
return position_value
def evaluate_position_and_count():
global nodes
position_value = 0
for i in range(64):
position_value += piece_values[board[i]]
nodes += 1
return position_value
# ****************************************************************************
# ******************************* SEARCH *************************************
# ****************************************************************************
def computer_move(computer_plays):
number_of_pieces = 0
for element in board:
if element != '-':
number_of_pieces += 1
if number_of_pieces <= 8:
end_game_bump = 3
if number_of_pieces <= 12:
end_game_bump = 2
elif number_of_pieces <= 17:
end_game_bump = 1
else:
end_game_bump = 0
depth = 4 + end_game_bump
if computer_plays == 'white':
#Initialize transposition table
initialize_transposition_table(depth)
calculation_result = calculate_white_move(depth, depth, -20000, 20000)
# Calculate_white_move will return either -1 or a dictionary containing instructions for moving. =1 means checkmate.
if calculation_result == -1: # Checkmate
print("Checkmate has occurred. Black wins.")
return
# Unpack the return dictionary
best_move_origin = calculation_result.get("best_move_origin")
best_move_destination = calculation_result.get("best_move_destination")
# Move the piece, with graphics.
board_display.selected = best_move_origin
move_selected_piece(best_move_destination)
elif computer_plays == 'black':
initialize_transposition_table(depth)
calculation_result = calculate_black_move(depth, depth, -20000, 20000)
# Calculate_black_move will return either -1 or a dictionary containing instructions for moving. -1 means checkmate.
if calculation_result == -1: # Checkmate
print("Checkmate has occurred. White wins.")
return
# Unpack the return dictionary
best_move_origin = calculation_result.get("best_move_origin")
best_move_destination = calculation_result.get("best_move_destination")
# Move the piece, with graphics.
board_display.selected = best_move_origin
move_selected_piece(best_move_destination)
def calculate_white_move(depth, current_depth, alpha, beta):
global white_to_move
global transposition_table
# store the current position, as well as the last move variables (for en passant detection)
position_memento = PositionMemento()
generate_moves()
# Initialize the best_move_origin as an impossible value, to allow detection of checkmate
best_move_origin = -1
# Find all moves for this node.
move_list = list(white_move_list)[:]
# Rank the moves by their position values
ranked_move_list = []
for move in move_list:
make_white_move(move[0], move[1])
position_value = evaluate_position()
position_memento.restore_current_position()
ranked_move_list.append((move, position_value))
for move in sorted(ranked_move_list, key = lambda move_and_value: move_and_value[1], reverse = True):
# Make move and return value of board (without graphics)
make_white_move(move[0][0], move[0][1])
# Recurse. If this calculation is the leaf of the search tree, find the position of the board.
# Otherwise, call the move function of the opposing side.
position_string = ''.join(board)
if position_string in transposition_table[castles][en_passant_square][current_depth]:
position_value = transposition_table[castles][en_passant_square][current_depth][position_string]
elif current_depth == 0:
position_value = evaluate_position_and_count()
transposition_table[castles][en_passant_square][current_depth][position_string] = position_value
else:
white_to_move = not white_to_move
position_value = calculate_black_move(depth, current_depth - 1, alpha, beta)
transposition_table[castles][en_passant_square][current_depth][position_string] = position_value
position_memento.restore_current_position()
# Alpha beta logic
if position_value > alpha:
alpha = position_value
best_move_origin = move[0][0]
best_move_destination = move[0][1]
if beta <= alpha:
if depth != current_depth: # Not root node
return alpha
else: # root node
if best_move_origin == -1: # Checkmate has occurred.
return -1
else:
return {"best_move_origin": best_move_origin, "best_move_destination": best_move_destination}
# For any node but the root of the tree, the function should return the calculate value of the position.
# For the root node of the tree, the function should return a piece index, and the x, y values of the best move.
# If checkmate has occurred in a branch, return an arbitrarily large number as the value of the position.
# If checkmate has occurred in the root node, return -1 as the piece index.
if depth != current_depth: # Not root node
return alpha
else: # root node
if best_move_origin == -1: # Checkmate has occurred.
return -1
else:
return {"best_move_origin": best_move_origin, "best_move_destination": best_move_destination}
def calculate_black_move(depth, current_depth, alpha, beta):
global white_to_move
global transposition_table
# store the current position, as well as the last move variables (for en passant detection)
position_memento = PositionMemento()
generate_moves()
# Initialize the best_move_origin as an impossible value, to allow detection of checkmate
best_move_origin = -1
# Find all moves for this node.
move_list = list(black_move_list)[:]
# Rank the moves by their position values
ranked_move_list = []
for move in move_list:
make_black_move(move[0], move[1])
position_value = evaluate_position()
position_memento.restore_current_position()
ranked_move_list.append((move, position_value))
for move in sorted(ranked_move_list, key = lambda move_and_value: move_and_value[1]):
# Make move and return value of board (without graphics)
make_black_move(move[0][0], move[0][1])
# Recurse. If this calculation is the leaf of the search tree, find the position of the board.
# Otherwise, call the move function of the opposing side.
position_string = ''.join(board)
if position_string in transposition_table[castles][en_passant_square][current_depth]:
position_value = transposition_table[castles][en_passant_square][current_depth][position_string]
elif current_depth == 0:
position_value = evaluate_position_and_count()
transposition_table[castles][en_passant_square][current_depth][position_string] = position_value
else:
white_to_move = not white_to_move
position_value = calculate_white_move(depth, current_depth - 1, alpha, beta)
transposition_table[castles][en_passant_square][current_depth][position_string] = position_value
position_memento.restore_current_position()
# Alpha beta logic
if position_value < beta:
beta = position_value
best_move_origin = move[0][0]
best_move_destination = move[0][1]
if beta <= alpha:
if depth != current_depth: # Not root node
return beta
else: # root node
if best_move_origin == -1: # Checkmate has occurred.
return -1
else:
return {"best_move_origin": best_move_origin, "best_move_destination": best_move_destination}
# For any node but the root of the tree, the function should return the calculate value of the position.
# For the root node of the tree, the function should return a piece index, and the x, y values of the best move.
# If checkmate has occurred in a branch, return an arbitrarily large number as the value of the position.
# If checkmate has occurred in the root node, return -1 as the piece index.
if depth != current_depth: # Not root node
return beta
else: # root node
if best_move_origin == -1: # Checkmate has occurred.
return -1
else:
return {"best_move_origin": best_move_origin, "best_move_destination": best_move_destination}
def calculate_white_perft(depth, current_depth):
global nodes
global white_to_move
if current_depth == 1:
generate_moves()
nodes += len(white_move_list)
else:
position_memento = PositionMemento()
generate_moves()
move_list = list(white_move_list)[:]
for move in move_list:
make_white_move(move[0], move[1])
white_to_move = not white_to_move
calculate_black_perft(depth, current_depth - 1)
position_memento.restore_current_position()
def calculate_black_perft(depth, current_depth):
global nodes
global white_to_move
if current_depth == 1:
generate_moves()
nodes += len(black_move_list)
else:
position_memento = PositionMemento()
generate_moves()
move_list = list(black_move_list)[:]
for move in move_list:
make_black_move(move[0], move[1])
white_to_move = not white_to_move
calculate_white_perft(depth, current_depth - 1)
position_memento.restore_current_position()
# ****************************************************************************
# *********************** INITIALIZATION FUNCTIONS ***************************
# ****************************************************************************
# Bind an event handler to the left click event
def initialize_board_display():
board_display.bind("<Button-1>", handle_click)
board_display.pack()
# Sets up the board in the initial position, and displays it.
def initialize_with_start_position():
global all_white_positions
global all_black_positions
# Manually set up a couple bitboards
all_white_positions = 0b0000001100000011000000110000001100000011000000110000001100000011
all_black_positions = 0b1100000011000000110000001100000011000000110000001100000011000000
# Generate the moves
generate_moves()
# Display the board
board_display.render_position(board, square_display)
def initialize_with_fen_position(fen_string):
global white_to_move
global castles
global en_passant_square
global halfmove_clock
global fullmove_number
global all_white_positions
global all_black_positions
global board
# Split it into separate parts
fen_array = fen_string.split(' ')
# Set up a blank board.
board = ['-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-','-']
# 1) PIECE PLACEMENT DATA
# Split the position string into an array, and reverse it so that we deal with rank 1 first
position_array = list(reversed(fen_array[0].split('/')))
# Loop through the array, parse each | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-07-17 10:44:43
# @Last Modified by: yulidong
# @Last Modified time: 2018-11-05 16:04:44
# -*- coding: utf-8 -*-
# @Author: lidong
# @Date: 2018-03-20 18:01:52
# @Last Modified by: yulidong
# @Last Modified time: 2018-07-16 22:16:14
import time
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from torch.nn.functional import cosine_similarity as cosine_s
from cmf import caffe_pb2
from cmf.models.utils import *
rsn_specs = {
'scene':
{
'n_classes': 9,
'input_size': (540, 960),
'block_config': [3, 4, 23, 3],
},
}
group_dim=32
pramid_dim=8
group_norm_group_num = 32
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=dilation if dilation > 1 else pad,
dilation=dilation,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(
in_planes,
out_planes,
kernel_size=kernel_size,
padding=pad,
stride=stride,
bias=False), nn.GroupNorm(group_norm_group_num, out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(
convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(
torch.index_select(
left, 3,
Variable(torch.LongTensor(
[i for i in range(shift, width)])).cuda()),
(shift, 0, 0, 0))
shifted_right = F.pad(
torch.index_select(
right, 3,
Variable(torch.LongTensor(
[i for i in range(width - shift)])).cuda()),
(shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(
batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super().__init__()
self.disp = Variable(
torch.Tensor(
np.reshape(np.array(range(maxdisp)),
[1, maxdisp, 1, 1])).cuda(),
requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(
convbn(3, 32, 3, 1, 1, 1),
# nn.GroupNorm(group_dim, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.secondconv = nn.Sequential(
convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 2, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 4)
self.branch1 = nn.Sequential(
nn.AvgPool2d((4, 4), stride=(4, 4)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(
nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(
nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(
nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(
convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.GroupNorm(group_norm_group_num, planes * block.expansion),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output_all = self.firstconv(x)
#print(output_all.shape)
output=self.secondconv(output_all)
#print(output.shape)
output_rt = self.layer1(output)
output_raw = self.layer2(output_rt)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
#print(output_skip.shape)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(
output_branch1, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(
output_branch2, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(
output_branch3, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(
output_branch4, (output_skip.size()[2], output_skip.size()[3]),
mode='bilinear',
align_corners=False)
output_feature = torch.cat(
(output_raw, output_skip, output_branch4, output_branch3,
output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature, output_rt,output_all
class hourglass(nn.Module):
def __init__(self, inplanes):
super().__init__()
self.conv1 = nn.Sequential(
convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
convbn_3d(
inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes * 2,
kernel_size=3,
padding=1,
output_padding=1,
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes * 2)) # +conv2
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(
inplanes * 2,
inplanes,
kernel_size=3,
padding=1,
output_padding=(1,1,1),
stride=2,
bias=False), nn.GroupNorm(group_norm_group_num,
inplanes)) # +x
def forward(self, x, presqu, postsqu):
out = self.conv1(x) # in:1/4 out:1/8
pre = self.conv2(out) # in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) # in:1/8 out:1/16
out = self.conv4(out) # in:1/16 out:1/16
if presqu is not None:
post = F.relu(
self.conv5(out) + presqu, inplace=True) # in:1/16 out:1/8
else:
post = F.relu(self.conv5(out) + pre, inplace=True)
out = self.conv6(post) # in:1/8 out:1/4
return out, pre, post
class similarity_measure1(nn.Module):
def __init__(self):
super(similarity_measure1, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(66, 32, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(32, 16, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu3 = nn.LeakyReLU(inplace=True)
# self.conv4 = nn.Conv2d(16, 8, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
# self.relu4 = nn.LeakyReLU(inplace=True)
# self.conv5 = nn.Conv2d(8, 1, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
# self.relu5 = nn.ReLU(inplace=True)
#self.s1=nn.Parameter(torch.ones(1)).float()*0.5
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
output = self.conv3(output)
output = self.relu3(output)
# output = self.conv4(output)
# output = self.relu4(output)
# output = self.conv5(output)
# #output = torch.abs(output)
# output = self.relu5(output)
# print(output.shape)
# print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())
# output = output/torch.max(output)
# output = output-torch.min(output)
# output = 1-output
# output = torch.exp(-output)
#print(torch.mean(output).item(),torch.max(output).item(),torch.min(output).item())
return output
class similarity_measure2(nn.Module):
def __init__(self):
super(similarity_measure2, self).__init__()
self.inplanes = 32
self.conv0 = nn.Conv2d(3, 3, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu0 = nn.LeakyReLU(inplace=True)
self.conv1 = nn.Conv2d(3, 2, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1)
self.relu2 = nn.LeakyReLU(inplace=True)
#self.s2=nn.Parameter(torch.ones(1)).float()*0.5
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias,0)
def forward(self, x):
output = self.conv0(x)
output = self.relu0(output)
output = self.conv1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.relu2(output)
return output
class context_mapping(nn.Module):
def __init__(self):
super(context_mapping,self).__init__()
self.similarity1=similarity_measure1()
self.similarity2=similarity_measure2()
#self.s1=nn.Parameter(torch.ones(1)).float()*0.5
#self.s2=nn.Parameter(torch.ones(1)).float()*0.1
self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1),nn.LeakyReLU(inplace=True))
# self.s2=nn.Conv2d(1, 1, kernel_size=1, stride=1, padding=0,
# bias=False,dilation=1)
self.fuse.weight.data.fill_(1)
# self.s2.weight.data.fill_(1)
#print(self.s1.weight.data)
#exit()
def forward(self, lr_feature, hr_feature):
# self.s1=self.s1.cuda()
# self.s2=self.s2.cuda()
self.fuse.weight.data=torch.abs(self.fuse.weight.data)
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2==0:
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
distance_matrix=torch.cat([distance_matrix,torch.sqrt(torch.pow(distance_matrix[0],2)+torch.pow(distance_matrix[1],2)).unsqueeze(0)],0)
else:
x=torch.arange(-scale//2,scale//2+1).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
#print(distance_matrix.shape)
distance_matrix=distance_matrix.repeat(1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).unsqueeze(0)
lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)
#128
representation=torch.cat([lr_feature,hr_feature,lr_feature*hr_feature,torch.pow(lr_feature-hr_feature,2)],1)
#print(representation.shape)
weights1=self.similarity1(representation)
weights2=self.similarity2(distance_matrix.float().cuda())
#self.s1=self.s1.view(1,1,1,1).repeat(weights1.shape[0],weights1.shape[1],weights1.shape[2],weights1.shape[3])
# self.s2=self.s2.view(1,1,1,1).repeat(weights1.shape[0],weights1.shape[1],weights1.shape[2],weights1.shape[3])
# mapping=(self.s1*weights1+self.s2*weights2)/(self.s1+self.s2)
#print(self.s1[0,0,0,0].item(),self.s2[0,0,0,0].item())
fuse=self.fuse(torch.ones(1,2,1,1).cuda())
#s2=self.s2(torch.ones(1,1,1,1).cuda())
print(self.fuse.weight.data.cpu().squeeze().numpy())
#mapping=(self.s1(weights1)+self.s2(weights2))
mapping=self.fuse(torch.cat([weights1,weights2],1))
return mapping
class six_related_context_mapping(nn.Module):
def __init__(self):
super(six_related_context_mapping,self).__init__()
self.similarity1=similarity_measure1()
#self.similarity2=similarity_measure2()
self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1),nn.LeakyReLU(inplace=True))
#self.fuse.weight.data.fill_(1)
def forward(self, lr_feature, hr_feature,lr_feature_r, hr_feature_r):
#self.fuse.weight.data=torch.abs(self.fuse.weight.data)
with torch.no_grad():
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2==0:
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
#distance_matrix=torch.cat([distance_matrix,torch.sqrt(torch.pow(distance_matrix[0],2)+torch.pow(distance_matrix[1],2)).unsqueeze(0)],0).unsqueeze(0)
distance_matrix=distance_matrix.unsqueeze(0)
padding1=torch.zeros(hr_feature.shape[0],1,hr_feature.shape[2],scale).float().cuda()
padding2=torch.zeros(hr_feature.shape[0],1,scale,hr_feature.shape[3]).float().cuda()
else:
exit()
#center
distance_matrix=distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()
distance_matrix2=distance_matrix+0
distance_matrix1=distance_matrix+0
distance_matrix3=distance_matrix+0
distance_matrix4=distance_matrix+0
lr_feature=lr_feature.unsqueeze(-1).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3],scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],lr_feature.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2],scale,lr_feature.shape[3]*scale) \
.contiguous().view(lr_feature.shape[0],lr_feature.shape[1],lr_feature.shape[2]*scale,lr_feature.shape[3]*scale)
#128
#representation=torch.cat([lr_feature,hr_feature,lr_feature*hr_feature,torch.pow(lr_feature-hr_feature,2)],1)
representation=torch.cat([lr_feature,hr_feature,distance_matrix],1)
weights1=self.similarity1(representation)
#weights2=self.similarity2(distance_matrix)
#mapping=self.fuse(torch.cat([weights1,weights2],1))
mapping=weights1
#target image
lr_feature_r=lr_feature_r.unsqueeze(-1).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3],scale) \
.contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],lr_feature_r.shape[3]*scale) \
.unsqueeze(-2).expand(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2],scale,lr_feature_r.shape[3]*scale) \
.contiguous().view(lr_feature_r.shape[0],lr_feature_r.shape[1],lr_feature_r.shape[2]*scale,lr_feature_r.shape[3]*scale)
#representation_target=torch.cat([lr_feature_r,hr_feature_r,lr_feature_r*hr_feature_r,torch.pow(lr_feature_r-hr_feature_r,2)],1)
representation_target=torch.cat([lr_feature_r,hr_feature_r,distance_matrix],1)
weights1_target=self.similarity1(representation_target)
#weights2_target=self.similarity2(distance_matrix)
#mapping_target=self.fuse(torch.cat([weights1_target,weights2_target],1))
mapping_target=weights1_target
#right
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0)
x=x.repeat(hr_feature.shape[0],hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()
distance_matrix1[:,0,:,:]=scale-x+1
#distance_matrix1=distance_matrix1.unsqueeze(0)
#distance_matrix1[:,2,:,:]=torch.sqrt(torch.pow(distance_matrix1[:,0,:,:],2)+torch.pow(distance_matrix1[:,1,:,:],2)).unsqueeze(0)
# representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],lr_feature[:,:,:,scale:]*hr_feature[:,:,:,:-scale], \
# torch.pow(lr_feature[:,:,:,scale:]-hr_feature[:,:,:,:-scale],2)],1)
representation_r=torch.cat([lr_feature[:,:,:,scale:],hr_feature[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)
weights1_r=self.similarity1(representation_r)
#weights2_r=self.similarity2(distance_matrix1[:,:,:,scale:])
#print(padding.shape)
#mapping_r=torch.cat([self.fuse(torch.cat([weights1_r,weights2_r],1)),padding1],-1)
mapping_r=torch.cat([weights1_r,padding1],-1)
#target
# representation_target_r=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],lr_feature_r[:,:,:,scale:]*hr_feature_r[:,:,:,:-scale], \
# torch.pow(lr_feature_r[:,:,:,scale:]-hr_feature_r[:,:,:,:-scale],2)],1)
representation_target_r=torch.cat([lr_feature_r[:,:,:,scale:],hr_feature_r[:,:,:,:-scale],distance_matrix1[:,:,:,scale:]],1)
weights1_target_r=self.similarity1(representation_target_r)
#weights2_target_r=self.similarity2(distance_matrix1[:,:,:,scale:])
#print(padding.shape)
#mapping_target_r=torch.cat([self.fuse(torch.cat([weights1_target_r,weights2_target_r],1)),padding1],-1)
mapping_target_r=torch.cat([weights1_target_r,padding1],-1)
#left
distance_matrix2[:,0,:,:]=x
#distance_matrix2=distance_matrix2.unsqueeze(0)
#distance_matrix2[:,2,:,:]=torch.sqrt(torch.pow(distance_matrix2[:,0,:,:],2)+torch.pow(distance_matrix2[:,1,:,:],2)).unsqueeze(0)
# representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],lr_feature[:,:,:,:-scale]*hr_feature[:,:,:,scale:], \
# torch.pow(lr_feature[:,:,:,:-scale]-hr_feature[:,:,:,scale:],2)],1)
representation_l=torch.cat([lr_feature[:,:,:,:-scale],hr_feature[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)
weights1_l=self.similarity1(representation_l)
#weights2_l=self.similarity2(distance_matrix2[:,:,:,:-scale])
#mapping_l=torch.cat([padding1,self.fuse(torch.cat([weights1_l,weights2_l],1))],-1)
mapping_l=torch.cat([padding1,weights1_l],-1)
#target
# representation_target_l=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],lr_feature_r[:,:,:,:-scale]*hr_feature_r[:,:,:,scale:], \
# torch.pow(lr_feature_r[:,:,:,:-scale]-hr_feature_r[:,:,:,scale:],2)],1)
representation_target_l=torch.cat([lr_feature_r[:,:,:,:-scale],hr_feature_r[:,:,:,scale:],distance_matrix2[:,:,:,:-scale]],1)
weights1_target_l=self.similarity1(representation_target_l)
#weights2_target_l=self.similarity2(distance_matrix2[:,:,:,:-scale])
mapping_target_l=torch.cat([padding1,weights1_target_l],-1)
#top
x=torch.arange(1,scale+1).float()
x=x.expand(scale,scale).unsqueeze(0).transpose(2,1)
x=x.repeat(hr_feature.shape[0],hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()
distance_matrix3[:,1,:,:]=(scale-x+1)
#distance_matrix3=distance_matrix3.unsqueeze(0)
#distance_matrix3[:,2,:,:]=torch.sqrt(torch.pow(distance_matrix3[:,0,:,:],2)+torch.pow(distance_matrix3[:,1,:,:],2)).unsqueeze(0)
# representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],lr_feature[:,:,:-scale,:]*hr_feature[:,:,scale:,:], \
# torch.pow(lr_feature[:,:,:-scale,:]-hr_feature[:,:,scale:,:],2)],1)
representation_t=torch.cat([lr_feature[:,:,:-scale,:],hr_feature[:,:,scale:,:],distance_matrix3[:,:,:-scale,:]],1)
weights1_t=self.similarity1(representation_t)
#weights2_t=self.similarity2(distance_matrix3[:,:,:-scale,:])
#mapping_t=torch.cat([padding2,self.fuse(torch.cat([weights1_t,weights2_t],1))],-2)
mapping_t=torch.cat([padding2,weights1_t],-2)
#bottom
distance_matrix4[:,1,:,:]=x
#distance_matrix4=distance_matrix4.unsqueeze(0)
#distance_matrix4[:,2,:,:]=torch.sqrt(torch.pow(distance_matrix4[:,0,:,:],2)+torch.pow(distance_matrix4[:,1,:,:],2)).unsqueeze(0)
# representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],lr_feature[:,:,scale:,:]*hr_feature[:,:,:-scale,:], \
# torch.pow(lr_feature[:,:,scale:,:]-hr_feature[:,:,:-scale,:],2)],1)
representation_b=torch.cat([lr_feature[:,:,scale:,:],hr_feature[:,:,:-scale,:],distance_matrix4[:,:,scale:,:]],1)
weights1_b=self.similarity1(representation_b)
#weights2_b=self.similarity2(distance_matrix4[:,:,scale:,:])
#mapping_b=torch.cat([self.fuse(torch.cat([weights1_b,weights2_b],1)),padding2],-2)
mapping_b=torch.cat([weights1_b,padding2],-2)
mapping_all=torch.cat([mapping,mapping_r,mapping_l,mapping_t,mapping_b],dim=1)
mapping_norm=F.softmax(mapping_all, dim=1)
mapping_all_target=torch.cat([mapping_target,mapping_target_r,mapping_target_l],dim=1)
mapping_norm_target=F.softmax(mapping_all_target, dim=1)
#return mapping,mapping_r,mapping_l,mapping_t,mapping_b
return torch.chunk(mapping_norm*mapping_all,5,dim=1),torch.chunk(mapping_norm_target*mapping_all_target,3,dim=1)
class four_related_context_mapping(nn.Module):
def __init__(self):
super(four_related_context_mapping,self).__init__()
self.similarity1=similarity_measure1()
self.similarity2=similarity_measure2()
self.fuse=nn.Sequential(nn.Conv2d(2, 1, kernel_size=1, stride=1, padding=0,
bias=False,dilation=1),nn.LeakyReLU(inplace=True))
#self.fuse.weight.data.fill_(1)
def forward(self, lr_feature, hr_feature):
#self.fuse.weight.data=torch.abs(self.fuse.weight.data)
with torch.no_grad():
scale=hr_feature.shape[-1]//lr_feature.shape[-1]
if scale%2==0:
x=torch.arange(-scale//2,scale//2+1).float()
x=torch.cat([x[:x.shape[0]//2],x[x.shape[0]//2+1:]]).unsqueeze(0)
distance_matrix=x.expand(scale,scale).unsqueeze(0)
distance_matrix=torch.cat([distance_matrix,distance_matrix.transpose(2,1)],0)
distance_matrix=torch.cat([distance_matrix,torch.sqrt(torch.pow(distance_matrix[0],2)+torch.pow(distance_matrix[1],2)).unsqueeze(0)],0).unsqueeze(0)
padding1=torch.zeros(hr_feature.shape[0],1,hr_feature.shape[2],scale).float().cuda()
padding2=torch.zeros(hr_feature.shape[0],1,scale,hr_feature.shape[3]).float().cuda()
else:
exit()
#center
distance_matrix=distance_matrix.repeat(hr_feature.shape[0],1,hr_feature.shape[-2]//scale,hr_feature.shape[-1]//scale).float().cuda()
| |
# list of rooted bridges
bridge_roots.append(bridge)
# iterate over the list of newly created rooted bridges
for bridge in bridge_roots:
# if the vertex is only one non-articulation
# point in the bridge
if len(bridge) == 1:
# that the singular element of the bridge
n = self.adjacencylist[bridge[0]]
# and create a new graph edge from it
new_edge = tuple(sorted([n[0], n[1]]))
# identify the arcs to be removed
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
# remove the network arcs (spatial) from the
# graph-theoretic representation
self.edges.remove(e1)
self.edges.remove(e2)
# remove the former network arc lengths from the
# graph edge lengths lookup
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.edge_lengths.pop(e1, None)
self.edge_lengths.pop(e2, None)
# and add the new edge length in their place
self.edge_lengths[new_edge] = length_e1 + length_e2
# update the pointers
self.arcs_to_edges[e1] = new_edge
self.arcs_to_edges[e2] = new_edge
# if there are more than one vertices in the bridge
else:
cumulative_length = 0
start_end = {}
# initialize a redundant set of bridge edges
redundant = set([])
# iterate over the current bridge
for b in bridge:
# iterate over each node in the bridge
for n in self.adjacencylist[b]:
# start the bridge with this node
if n not in bridge:
start_end[b] = n
# or create a redundant edge with the current
# node and `b`
else:
redundant.add(tuple(sorted([b, n])))
# initialize a new graph edge
new_edge = tuple(sorted(start_end.values()))
# add start_end redundant edge
for k, v in start_end.items():
redundant.add(tuple(sorted([k, v])))
# remove all redundant network arcs while
# adjusting the graph edge lengths lookup
# and the edges_to_arcs lookup
for r in redundant:
self.edges.remove(r)
cumulative_length += self.edge_lengths[r]
self.edge_lengths.pop(r, None)
self.arcs_to_edges[r] = new_edge
# finally, add the new cumulative edge length
self.edge_lengths[new_edge] = cumulative_length
# add the updated graph edge
self.edges.append(new_edge)
# converted the graph edges into a sorted set to prune out
# duplicate graph edges created during simplification
self.edges = sorted(set(self.edges))
def _yield_napts(self):
"""Find all nodes with degree 2 that are not in an isolated
island ring (loop) component. These are non-articulation
points on the graph representation.
Returns
-------
napts : list
Non-articulation points on a graph representation.
"""
# non-articulation points
napts = set()
# network vertices remaining to evaluate
unvisted = set(self.vertices.values())
while unvisted:
# iterate over each component
for component_id, ring in self.network_component_is_ring.items():
# evaluate for non-articulation points
napts, unvisted = self._evaluate_napts(
napts, unvisted, component_id, ring
)
# convert set of non-articulation points into list
napts = list(napts)
return napts
def _evaluate_napts(self, napts, unvisited, component_id, ring):
"""Evaluate one connected component in a network for
non-articulation points (``napts``) and return an updated set of
``napts`` and unvisted vertices.
Parameters
----------
napts : set
Non-articulation points (``napts``) in the network. The
``napts`` here do not include those within an isolated
loop island.
unvisited : set
Vertices left to evaluate in the network.
component_id : int
ID for the network connected component for the
current iteration of the algorithm.
ring : bool
Network component is isolated island loop ``True`` or
not ``False``.
Returns
-------
napts : set
Updated ``napts`` object.
unvisited : set
Updated ``napts`` object.
"""
# iterate over each `edge` of the `component`
for component in self.network_component2arc[component_id]:
# each `component` has two vertices
for vertex in component:
# if `component` is not an isolated island
# and `vertex` has exactly 2 neighbors,
# add `vertex` to `napts`
if not ring:
if len(self.adjacencylist[vertex]) == 2:
napts.add(vertex)
# remove `vertex` from `unvisited` if
# it is still in the set else move along to
# the next iteration
try:
unvisited.remove(vertex)
except KeyError:
pass
return napts, unvisited
def _yieldneighbor(self, vtx, arc_vertices, bridge):
"""Used internally, this method traverses a bridge arc
to find the source and destination nodes.
Parameters
----------
vtx : int
The vertex ID.
arc_vertices : list
All non-articulation points (``napts``) in the network.
These are referred to as degree-2 vertices.
bridge : list
Inital bridge list containing only ``vtx``.
Returns
-------
nodes : list
Vertices to keep (articulation points). These elements are
referred to as nodes.
"""
# instantiate empty lis to fill with network articulation
# points (nodes with a degree of 1 [endpoints] or greater
# than 2 [intersections])
nodes = []
# get all nodes adjacent to `vtx` that are not in the
# set of 'bridge' vertices
for i in self.adjacencylist[vtx]:
if i in arc_vertices and i not in bridge:
nodes.append(i)
return nodes
def contiguityweights(
self, graph=True, weightings=None, from_split=False, weights_kws=dict()
):
"""Create a contiguity-based ``libpysal.weights.W`` object.
Parameters
----------
graph : bool
Controls whether the ``libpysal.weights.W`` is generated
using the spatial representation (``False``) or the graph
representation (``True``). Default is ``True``.
weightings : {dict, None}
Dictionary of lists of weightings for each arc/edge. Default is ``None``.
from_split : bool
Flag for whether the method is being called from within
``split_arcs()`` (``True``) or not (``False``). Default is ``False``.
weights_kws : dict
Keyword arguments for ``libpysal.weights.W``.
Returns
-------
W : libpysal.weights.W
A ``W`` representing the binary adjacency of the network.
See also
--------
libpysal.weights.W
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> import numpy
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap point observations to the network with attribute information.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Find counts per network arc.
>>> counts = ntw.count_per_link(
... ntw.pointpatterns["crimes"].obs_to_arc, graph=False
... )
>>> counts[(50, 165)]
4
Create a contiguity-based ``W`` object.
>>> w = ntw.contiguityweights(graph=False)
>>> w.n, w.n_components
(303, 1)
Notes
-----
See :cite:`pysal2007` for more details.
"""
# instantiate OrderedDict to record network link
# adjacency which will be keyed by the link ID (a tuple)
# with values being lists of tuples (contiguous links)
neighbors = OrderedDict()
# flag network (arcs) or graph (edges)
if graph:
links = self.edges
else:
links = self.arcs
# if weightings are desired instantiate a dictionary
# other ignore weightings
if weightings:
_weights = {}
else:
_weights = None
# iterate over all links until all possibilities
# for network link adjacency are exhausted
working = True
while working:
# for each network link (1)
for key in links:
# instantiate a slot in the OrderedDict
neighbors[key] = []
if weightings:
_weights[key] = []
# for each network link (2)
for neigh in links:
# skip if comparing link to itself
if key == neigh:
continue
# if link(1) and link(2) share any vertex
# update neighbors adjacency
if (
key[0] == neigh[0]
or key[0] == neigh[1]
or key[1] == neigh[0]
or key[1] == neigh[1]
):
neighbors[key].append(neigh)
# and add weights if desired
if weightings:
_weights[key].append(weightings[neigh])
# break condition
# -- everything is sorted, so we know when we have
# stepped beyond a possible neighbor
if key[1] > neigh[1]:
working = False
if len(links) == 1 or from_split:
working = False
# call libpysal for `W` instance
weights_kws["weights"] = _weights
w = weights.W(neighbors, **weights_kws)
return w
def distancebandweights(self, threshold, n_processes=1, gen_tree=False):
"""Create distance-based weights.
Parameters
----------
threshold : float
Distance threshold value.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path with ``True``, or skip with ``False``.
Default is ``False``.
Returns
-------
w : libpysal.weights.W
A ``W`` object representing the binary adjacency of
the network.
Notes
-----
See :cite:`AnselinRey2014` and :cite:`rey_open_2015` for more details
regarding spatial weights.
See also
--------
libpysal.weights.W
Examples
--------
Instantiate an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Create a contiguity-based ``W`` object based on network distance, ``500``
`US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_.
>>> w = ntw.distancebandweights(threshold=500)
Show the number of units in the ``W`` object.
>>> w.n
230
There are ``8`` units with ``3`` neighbors in the ``W`` | |
<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
FASL_GRAPH_DEF_TYPE = 1
FASL_GRAPH_REF_TYPE = 2
FASL_FALSE_TYPE = 3
FASL_TRUE_TYPE = 4
FASL_NULL_TYPE = 5
FASL_VOID_TYPE = 6
FASL_EOF_TYPE = 7
FASL_INTEGER_TYPE = 8
FASL_FLONUM_TYPE = 9
FASL_SINGLE_FLONUM_TYPE = 10
FASL_RATIONAL_TYPE = 11
FASL_COMPLEX_TYPE = 12
FASL_CHAR_TYPE = 13
FASL_SYMBOL_TYPE = 14
FASL_UNREADABLE_SYMBOL_TYPE = 15
FASL_UNINTERNED_SYMBOL_TYPE = 16
FASL_KEYWORD_TYPE = 17
FASL_STRING_TYPE = 18
FASL_IMMUTABLE_STRING_TYPE = 19
FASL_BYTES_TYPE = 20
FASL_IMMUTABLE_BYTES_TYPE = 21
FASL_PATH_TYPE = 22
FASL_RELATIVE_PATH_TYPE = 23
FASL_PREGEXP_TYPE = 24
FASL_REGEXP_TYPE = 25
FASL_BYTE_PREGEXP = 26
FASL_BYTE_REGEXP_TYPE = 27
FASL_LIST_TYPE = 28
FASL_LIST_STAR_TYPE = 29
FASL_PAIR_TYPE = 30
FASL_VECTOR_TYPE = 31
FASL_IMMUTABLE_VECTOR_TYPE = 32
FASL_BOX_TYPE = 33
FASL_IMMUTABLE_BOX_TYPE = 34
FASL_PREFAB_TYPE = 35
FASL_HASH_TYPE = 36
FASL_IMMUTABLE_HASH_TYPE = 37
FASL_SRCLOC = 38
FASL_EXTFLONUM_TYPE = 39
# 100 to 255 is used for small integers:
FASL_SMALL_INTEGER_START = 100
FASL_LOWEST_SMALL_INTEGER = -10
FASL_HIGHEST_SMALL_INTEGER = 255 - ((FASL_SMALL_INTEGER_START - FASL_LOWEST_SMALL_INTEGER) - 1)
FASL_PREFIX = "racket/fasl:"
FASL_PREFIX_LENGTH = len(FASL_PREFIX)
FASL_HASH_EQ_VARIANT = 0
FASL_HASH_EQUAL_VARIANT = 1
FASL_HASH_EQV_VARIANT = 2
#################################################
class Fasl(object):
_attrs_ = ["GLOBAL_SHARED_COUNT", "SHARED", "current_relative_dir"]
_immutable_fields_ = ["current_relative_dir"]
def __init__(self, relative_dir=None):
self.GLOBAL_SHARED_COUNT = -1
self.SHARED = []
self.current_relative_dir = relative_dir
def to_sexp_from_file(self, file_name):
from pycket.values_string import W_String
from pycket.prims.input_output import open_infile
port = open_infile(W_String.make(file_name), "rb")
return self.to_sexp_from_w_port(port)
def to_sexp_from_w_port(self, port):
prefix = port.read(FASL_PREFIX_LENGTH)
if prefix != FASL_PREFIX:
raise Exception("unrecognized prefix : %s " % prefix)
shared_count = self.read_fasl_integer_stream(port)
self.GLOBAL_SHARED_COUNT = shared_count
self.SHARED = [None]*shared_count
length = self.read_fasl_integer_stream(port)
fasl_string = port.read(length)
pos = 0
sexp, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return sexp
def read_multi_double_into_rpython_list(self, fasl_string, pos, length):
keys = [None]*length
vals = [None]*length
for i in range(length):
k, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
v, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
keys[i] = k
vals[i] = v
return keys, vals, pos
def read_multi_into_rpython_list(self, fasl_string, pos, length):
vals = [None]*length
for i in range(length):
element, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
vals[i] = element
return vals, pos
def read_fasl_string(self, fasl_string, pos, length=-1):
if length < 0:
length, pos = self.read_fasl_integer(fasl_string, pos)
return self.read_bytes_exactly(fasl_string, pos, length)
# TODO: check utf-8
def read_fasl_bytes(self, fasl_string, pos):
bytes_len, pos = self.read_fasl_integer(fasl_string, pos)
return self.read_bytes_exactly(fasl_string, pos, bytes_len)
def read_byte_no_eof(self, fasl_string, pos):
return ord(fasl_string[pos]), pos+1
def read_bytes_exactly(self, fasl_string, pos, n):
if pos+n > len(fasl_string):
raise Exception("truncated stream")
return self.get_slice(fasl_string, pos, pos+n), pos+n
def read_fasl_integer(self, fasl_string, pos):
b, pos = self.read_byte_no_eof(fasl_string, pos)
return self.fasl_integer_inner(fasl_string, pos, b)
def fasl_integer_inner(self, fasl_string, pos, b):
from pycket import values as v
from pycket.prims.numeric import _integer_bytes_to_integer
from pycket.prims.string import _str2num
from pycket.values_string import W_String
if b <= 127:
return b, pos
elif b >= 132:
return b-256, pos
elif b == 128:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 2)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint(), pos
elif b == 129:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 4)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint(), pos
elif b == 130:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 8)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint(), pos
elif b == 131:
length, pos = self.read_fasl_integer(fasl_string, pos)
num_str, pos = self.read_fasl_string(fasl_string, pos, length)
if len(num_str) != length:
raise Exception("fasl: truncated stream at number")
return _str2num(W_String.fromstr_utf8(num_str).as_str_utf8(), 16).toint(), pos
else:
raise Exception("fasl: internal error on integer mode")
def read_bytes_exactly_stream(self, stream, n):
bytes = stream.read(n)
if len(bytes) != n:
raise Exception("truncated stream")
return bytes
def read_fasl_integer_stream(self, stream):
from pycket import values as v
from pycket.prims.numeric import _integer_bytes_to_integer
from pycket.prims.string import _str2num
from pycket.values_string import W_String
_b = stream.read(1)[0]
if not _b:
raise Exception("truncated stream - got eof")
b = ord(_b)
if b <= 127:
return b
elif b >= 132:
return b-256
elif b == 128:
num_str = self.read_bytes_exactly_stream(stream, 2)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint()
elif b == 129:
num_str = self.read_bytes_exactly_stream(stream, 4)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint()
elif b == 130:
num_str = self.read_bytes_exactly_stream(stream, 8)
return _integer_bytes_to_integer(list(num_str), v.w_true, v.w_false).toint()
elif b == 131:
length = self.read_fasl_integer_stream(stream)
assert isinstance(length, int)
num_str = self.read_bytes_exactly_stream(stream, length)
if len(num_str) != length:
raise Exception("fasl: truncated stream at number")
return _str2num(W_String.fromstr_utf8(num_str).as_str_utf8(), 16).toint()
else:
raise Exception("fasl: internal error on integer mode")
def get_slice(self, string, start, stop):
assert stop > 0 and start >= 0
return string[start:stop]
# let's not worry about the CPS'in this right now
# we probably won't have any sexp deeper than the stack anyways
def fasl_to_sexp_recursive(self, fasl_string, pos):
from pycket import values as v
from pycket.values_string import W_String
from pycket.values_regex import W_Regexp, W_PRegexp, W_ByteRegexp, W_BytePRegexp
from pycket.vector import W_Vector
from pycket.values_struct import W_Struct
from pycket.hash import simple as hash_simple
from pycket.hash.equal import W_EqualHashTable
from pycket.prims.numeric import float_bytes_to_real
from pycket.prims.string import _str2num
from rpython.rlib.rbigint import rbigint
from pycket.prims.input_output import build_path, bytes_to_path_element
from pycket.ast_vs_sexp import to_rpython_list
typ, pos = self.read_byte_no_eof(fasl_string, pos)
if typ == FASL_GRAPH_DEF_TYPE:
position, pos = self.read_fasl_integer(fasl_string, pos)
val, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
if position >= self.GLOBAL_SHARED_COUNT:
raise Exception("fasl: bad graph index")
self.SHARED[position] = val
return val, pos
elif typ == FASL_GRAPH_REF_TYPE:
position, pos = self.read_fasl_integer(fasl_string, pos)
if position >= self.GLOBAL_SHARED_COUNT:
raise Exception("fasl: bad graph index")
return self.SHARED[position], pos
elif typ == FASL_FALSE_TYPE:
return v.w_false, pos
elif typ == FASL_TRUE_TYPE:
return v.w_true, pos
elif typ == FASL_NULL_TYPE:
return v.w_null, pos
elif typ == FASL_VOID_TYPE:
return v.w_void, pos
elif typ == FASL_EOF_TYPE:
return v.eof_object, pos
elif typ == FASL_INTEGER_TYPE:
num, pos = self.read_fasl_integer(fasl_string, pos)
if isinstance(num, rbigint):
return v.W_Bignum(num), pos
return v.W_Fixnum(num), pos
elif typ == FASL_FLONUM_TYPE:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 8)
return float_bytes_to_real(list(num_str), v.w_false), pos
elif typ == FASL_SINGLE_FLONUM_TYPE:
num_str, pos = self.read_bytes_exactly(fasl_string, pos, 4)
real = float_bytes_to_real(list(num_str), v.w_false)
return real.arith_exact_inexact(), pos
elif typ == FASL_EXTFLONUM_TYPE:
bstr_len, pos = self.read_fasl_integer(fasl_string, pos)
num_str, pos = self.read_bytes_exactly(fasl_string, pos, bstr_len)
return _str2num(W_String.fromstr_utf8(num_str).as_str_utf8(), 10), pos
elif typ == FASL_RATIONAL_TYPE:
num, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
den, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_Rational.make(num, den), pos
elif typ == FASL_COMPLEX_TYPE:
re, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
im, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_Complex.from_real_pair(re, im), pos
elif typ == FASL_CHAR_TYPE:
_chr, pos = self.read_fasl_integer(fasl_string, pos)
return v.W_Character(unichr(_chr)), pos
elif typ == FASL_SYMBOL_TYPE:
sym_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Symbol.make(sym_str), pos
elif typ == FASL_UNREADABLE_SYMBOL_TYPE:
sym_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Symbol.make_unreadable(sym_str), pos
elif typ == FASL_UNINTERNED_SYMBOL_TYPE:
sym_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Symbol(sym_str), pos
elif typ == FASL_KEYWORD_TYPE:
key_str, pos = self.read_fasl_string(fasl_string, pos)
return v.W_Keyword.make(key_str), pos
elif typ == FASL_STRING_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_String.make(str_str), pos
elif typ == FASL_IMMUTABLE_STRING_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_String.make(str_str).make_immutable(), pos
elif typ == FASL_BYTES_TYPE:
byts, pos = self.read_fasl_bytes(fasl_string, pos)
return v.W_Bytes.from_string(byts, immutable=False), pos
elif typ == FASL_IMMUTABLE_BYTES_TYPE:
byts, pos = self.read_fasl_bytes(fasl_string, pos)
return v.W_Bytes.from_string(byts), pos
elif typ == FASL_PATH_TYPE:
byts, pos = self.read_fasl_bytes(fasl_string, pos)
return v.W_Path(byts), pos
elif typ == FASL_RELATIVE_PATH_TYPE:
wrt_dir = self.current_relative_dir
p_w_lst, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
p_r_lst, _ = to_rpython_list(p_w_lst)
rel_elems = [bytes_to_path_element(p) if isinstance(p, v.W_Bytes) else p for p in p_r_lst]
if wrt_dir:
return build_path([wrt_dir] + rel_elems), pos
elif rel_elems == []:
return build_path([v.W_Symbol.make("same")]), pos
else:
return build_path(rel_elems), pos
elif typ == FASL_PREGEXP_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_PRegexp(str_str), pos
elif typ == FASL_REGEXP_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_Regexp(str_str), pos
elif typ == FASL_BYTE_PREGEXP:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_BytePRegexp(str_str), pos
elif typ == FASL_BYTE_REGEXP_TYPE:
str_str, pos = self.read_fasl_string(fasl_string, pos)
return W_ByteRegexp(str_str), pos
elif typ == FASL_LIST_TYPE:
list_len, pos = self.read_fasl_integer(fasl_string, pos)
lst, pos = self.read_multi_into_rpython_list(fasl_string, pos, list_len)
return v.to_list(lst), pos
elif typ == FASL_PAIR_TYPE:
car, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
cdr, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_Cons.make(car, cdr), pos
elif typ == FASL_LIST_STAR_TYPE:
list_len, pos = self.read_fasl_integer(fasl_string, pos)
# list_len is the length of the proper part
lst, pos = self.read_multi_into_rpython_list(fasl_string, pos, list_len)
# read the last element
return_list, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
for i in range(list_len-1, -1, -1):
return_list = v.W_Cons.make(lst[i], return_list)
return return_list, pos
elif typ == FASL_VECTOR_TYPE or typ == FASL_IMMUTABLE_VECTOR_TYPE:
vec_len, pos = self.read_fasl_integer(fasl_string, pos)
storage, pos = self.read_multi_into_rpython_list(fasl_string, pos, vec_len)
if typ == FASL_IMMUTABLE_VECTOR_TYPE:
return W_Vector.fromelements(storage, immutable=True), pos
return W_Vector.fromelements(storage), pos
elif typ == FASL_BOX_TYPE:
element, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_MBox(element), pos
elif typ == FASL_IMMUTABLE_BOX_TYPE:
element, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
return v.W_IBox(element), pos
elif typ == FASL_PREFAB_TYPE:
key, pos = self.fasl_to_sexp_recursive(fasl_string, pos)
length, pos = self.read_fasl_integer(fasl_string, pos)
vals, pos = self.read_multi_into_rpython_list(fasl_string, pos, length)
return W_Struct.make_prefab(key, vals), pos
elif typ == FASL_HASH_TYPE:
variant, pos = self.read_byte_no_eof(fasl_string, pos)
length, pos = self.read_fasl_integer(fasl_string, pos)
keys, vals, pos = self.read_multi_double_into_rpython_list(fasl_string, pos, length)
if variant == FASL_HASH_EQ_VARIANT:
return hash_simple.make_simple_mutable_table(hash_simple.W_EqMutableHashTable, keys, vals), pos
elif variant == | |
[make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_split_change_event(3))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Validate the SSE requests
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'US45QnJtR<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'<KEY>WJ<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll on retryable error handling
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth after connection breaks
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected again
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after new notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_ably_errors_handling(self):
"""Test incoming ably errors and validate its handling."""
import logging
logger = logging.getLogger('splitio')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'<KEY>')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# We'll send an ignorable error and check it has nothing happened
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_ably_error_event(60000, 600))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
sse_server.publish(make_ably_error_event(40145, 401))
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
time.sleep(3)
assert task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'off'
# Re-publish initial events so that the retry succeeds
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
time.sleep(3)
assert not task.running()
# Assert streaming is working properly
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_split_change_event(3))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Send a non-retryable ably error
sse_server.publish(make_ably_error_event(40200, 402))
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
time.sleep(3)
# Assert sync-task is running and the streaming status handler thread is over
assert task.running()
assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()]
# Validate the SSE requests
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbG<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll retriable error
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth again
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push is up
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after non recoverable ably error
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def make_split_change_event(change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'data': json.dumps({
'type': 'SPLIT_UPDATE',
'changeNumber': change_number
})
})
}
def make_split_kill_event(name, default_treatment, change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'data': json.dumps({
'type': 'SPLIT_KILL',
'splitName': name,
'defaultTreatment': default_treatment,
'changeNumber': change_number
})
})
}
def make_initial_event():
"""Make a split change event."""
return {'id':'TVUsxaabHs:0:0'}
def make_occupancy(channel, publishers):
"""Make an occupancy event."""
return {
'event': 'message',
'data': json.dumps({
'id':'aP6EuhrcUm:0:0',
'timestamp':1604325712734,
'encoding': 'json',
'channel': "[?occupancy=metrics.publishers]%s" % channel,
'data': json.dumps({'metrics': {'publishers': publishers}}),
'name':'[meta]occupancy'
})
}
def make_segment_change_event(name, change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'data': json.dumps({
'type': 'SEGMENT_UPDATE',
'segmentName': name,
'changeNumber': change_number
})
})
}
def make_control_event(control_type, timestamp):
"""Make a control event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': timestamp,
'encoding':'json',
'channel':'[?occupancy=metrics.publishers]control_pri',
'data': json.dumps({
'type': 'CONTROL',
'controlType': control_type,
})
})
}
def make_ably_error_event(code, status):
"""Make a control event."""
return {
'event': 'error',
'data': json.dumps({
'message':'Invalid accessToken in request: sarasa',
'code': code,
'statusCode': status,
'href':"https://help.ably.io/error/%d" % code
})
}
def make_simple_split(name, cn, active, killed, default_treatment, tt, on):
"""Make a simple split."""
return {
'trafficTypeName': tt,
'name': name,
'seed': 1699838640,
'status': 'ACTIVE' if active else 'ARCHIVED',
'changeNumber': cn,
'killed': killed,
'defaultTreatment': default_treatment,
'conditions': [
{
'matcherGroup': {
'combiner': 'AND',
'matchers': [
{
'matcherType': 'ALL_KEYS',
'negate': False,
'userDefinedSegmentMatcherData': None,
'whitelistMatcherData': None
}
]
},
'partitions': [
{'treatment': 'on' if | |
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('deleting', volume['status'])
db.volume_destroy(self.context, volume['id'])
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume['id'])
def test_extra_capabilities(self):
# Test valid extra_capabilities.
fake_capabilities = {'key1': 1, 'key2': 2}
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
manager = vol_manager.VolumeManager()
manager.stats = {'pools': {}}
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
self.assertTrue(mock_loads.called)
volume_stats = manager.last_capabilities
self.assertEqual(fake_capabilities['key1'],
volume_stats['key1'])
self.assertEqual(fake_capabilities['key2'],
volume_stats['key2'])
def test_extra_capabilities_fail(self):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.side_effect = exception.CinderException('test')
self.assertRaises(exception.CinderException,
vol_manager.VolumeManager)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(
mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy(
volume_name='fake'))
self.mox.ReplayAll()
self.volume.delete_volume(self.context, volume_id, volume=volume)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
def test_get_volume_different_tenant(self):
"""Test can't get volume of another tenant when viewable_admin_meta."""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id, volume=volume)
another_context = context.RequestContext('another_user_id',
'another_project_id',
is_admin=False)
self.assertNotEqual(another_context.project_id,
self.context.project_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.VolumeNotFound, volume_api.get,
another_context, volume_id, viewable_admin_meta=True)
self.assertEqual(volume_id,
volume_api.get(self.context, volume_id)['id'])
self.volume.delete_volume(self.context, volume_id, volume=volume)
def test_get_all_limit_bad_value(self):
"""Test value of 'limit' is numeric and >= 0"""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
limit="A")
self.assertRaises(exception.InvalidInput,
volume_api.get_all,
self.context,
limit="-1")
def test_get_all_tenants_volume_list(self):
"""Validate when the volume list for all tenants is returned"""
volume_api = cinder.volume.api.API()
with mock.patch.object(volume_api.db,
'volume_get_all_by_project') as by_project:
with mock.patch.object(volume_api.db,
'volume_get_all') as get_all:
db_volume = {'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID}
volume = fake_volume.fake_db_volume(**db_volume)
by_project.return_value = [volume]
get_all.return_value = [volume]
volume_api.get_all(self.context, filters={'all_tenants': '0'})
self.assertTrue(by_project.called)
by_project.called = False
self.context.is_admin = False
volume_api.get_all(self.context, filters={'all_tenants': '1'})
self.assertTrue(by_project.called)
# check for volume list of all tenants
self.context.is_admin = True
volume_api.get_all(self.context, filters={'all_tenants': '1'})
self.assertTrue(get_all.called)
def test_delete_volume_in_error_extending(self):
"""Test volume can be deleted in error_extending stats."""
# create a volume
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume.id, volume=volume)
# delete 'error_extending' volume
db.volume_update(self.context, volume['id'],
{'status': 'error_extending'})
self.volume.delete_volume(self.context, volume.id, volume=volume)
self.assertRaises(exception.NotFound, db.volume_get,
self.context, volume['id'])
@mock.patch.object(db.sqlalchemy.api, 'volume_get',
side_effect=exception.VolumeNotFound(
volume_id='12345678-1234-5678-1234-567812345678'))
def test_delete_volume_not_found(self, mock_get_volume):
"""Test delete volume moves on if the volume does not exist."""
volume_id = '12345678-1234-5678-1234-567812345678'
volume = objects.Volume(self.context, id=volume_id)
self.volume.delete_volume(self.context, volume_id, volume=volume)
self.assertTrue(mock_get_volume.called)
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create_from_snap):
"""Test volume can be created from a snapshot."""
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
snapshot_id = create_snapshot(volume_src['id'],
size=volume_src['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_obj)
volume_dst = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst.id,
volume=volume_dst)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id,
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst.id,
volume=volume_dst)
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_snapshot_with_types(self, _get_flow):
"""Test volume create from snapshot with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(
context.get_admin_context(),
{'name': 'foo',
'extra_specs': {'volume_backend_name': 'dev_1'}})
db.volume_type_create(
context.get_admin_context(),
{'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
snapshot = {'id': fake.SNAPSHOT_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
# Make sure the case of specifying a type that
# doesn't match the snapshots type fails
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
# Make sure that trying to specify a type
# when the snapshots type is None fails
snapshot_obj.volume_type_id = None
self.assertRaises(exception.InvalidVolumeType,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
snapshot_obj.volume_type_id = foo_type['id']
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=foo_type,
snapshot=snapshot_obj)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_types(self, _get_flow):
"""Test volume create from source with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(
context.get_admin_context(),
{'name': 'foo',
'extra_specs': {'volume_backend_name': 'dev_1'}})
db.volume_type_create(
context.get_admin_context(),
{'name': 'biz', 'extra_specs': {'volume_backend_name': 'dev_2'}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
source_vol = {'id': fake.VOLUME_ID,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
# Make sure that trying to specify a type
# when the source type is None fails
source_vol['volume_type_id'] = None
source_vol['volume_type'] = None
self.assertRaises(exception.InvalidVolumeType,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
source_vol['volume_type_id'] = biz_type['id']
source_vol['volume_type'] = biz_type
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=biz_type,
source_volume=source_vol)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_same_backend(self, _get_flow):
"""Test volume create from source with type mismatch same backend."""
volume_api = cinder.volume.api.API()
foo_type = {
'name': 'foo',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232),
'updated_at': None,
'extra_specs': {'volume_backend_name': 'dev_1'},
'is_public': True,
'deleted_at': None,
'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38',
'description': None}
biz_type = {
'name': 'biz',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232),
'updated_at': None,
'extra_specs': {'volume_backend_name': 'dev_1'},
'is_public': True,
'deleted_at': None,
'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59',
'description': None}
source_vol = {'id': fake.VOLUME_ID,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type') as mock_get_type:
mock_get_type.return_value = biz_type
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_from_source_and_snap_only_one_backend(self, _get_flow):
"""Test create from source and snap with type mismatch one backend."""
volume_api = cinder.volume.api.API()
foo_type = {
'name': 'foo',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 40, 5, 408232),
'updated_at': None,
'extra_specs': {'some_key': 3},
'is_public': True,
'deleted_at': None,
'id': '29e43b50-2cd7-4d0c-8ddd-2119daab3a38',
'description': None}
biz_type = {
'name': 'biz',
'qos_specs_id': None,
'deleted': False,
'created_at': datetime.datetime(2015, 5, 8, 0, 20, 5, 408232),
'updated_at': None,
'extra_specs': {'some_other_key': 4},
'is_public': True,
'deleted_at': None,
'id': '34e54c31-3bc8-5c1d-9fff-2225bcce4b59',
'description': None}
source_vol = {'id': fake.VOLUME_ID,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
snapshot = {'id': fake.SNAPSHOT_ID,
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
with mock.patch('cinder.db.service_get_all') as mock_get_service, \
mock.patch.object(volume_api,
'list_availability_zones') as mock_get_azs:
mock_get_service.return_value = [{'host': 'foo'}]
mock_get_azs.return_value = {}
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot_obj)
def test_create_snapshot_driver_not_initialized(self):
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src.id,
volume=volume_src)
snapshot_id = create_snapshot(volume_src['id'],
size=volume_src['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_snapshot,
self.context, volume_src['id'], snapshot_obj)
# NOTE(flaper87): The volume status should be error.
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)
# lets cleanup the mess
self.volume.driver._initialized = True
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume_src.id,
volume=volume_src)
def _mock_synchronized(self, name, *s_args, **s_kwargs):
def inner_sync1(f):
def inner_sync2(*args, **kwargs):
self.called.append('lock-%s' % (name))
ret = f(*args, **kwargs)
self.called.append('unlock-%s' % (name))
return ret
return inner_sync2
return inner_sync1
def _fake_execute(self, *cmd, **kwargs):
pass
@mock.patch.object(coordination.Coordinator, 'get_lock')
@mock.patch.object(cinder.volume.drivers.lvm.LVMVolumeDriver,
'create_volume_from_snapshot')
def test_create_volume_from_snapshot_check_locks(
self, mock_lvm_create, mock_lock):
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
mock_lock.assert_called_with('%s-delete_snapshot' % snap_id)
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
snap_id = create_snapshot(src_vol_id,
size=src_vol['size'])['id']
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snap_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
request_spec={'snapshot_id': snap_id},
volume=dst_vol)
mock_lock.assert_called_with('%s-delete_snapshot' % snap_id)
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(snap_id,
db.volume_get(admin_ctxt, dst_vol_id).snapshot_id)
# locked
self.volume.delete_volume(self.context, dst_vol_id, volume=dst_vol)
mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id)
# locked
self.volume.delete_snapshot(self.context, snapshot_obj)
mock_lock.assert_called_with('%s-delete_snapshot' % snap_id)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
self.assertTrue(mock_lvm_create.called)
@mock.patch.object(coordination.Coordinator, 'get_lock')
def test_create_volume_from_volume_check_locks(self, mock_lock):
# mock the synchroniser so we can record events
self.stubs.Set(utils, 'execute', self._fake_execute)
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
self.assertEqual(0, mock_lock.call_count)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
request_spec={'source_volid': src_vol_id},
volume=dst_vol)
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(src_vol_id,
db.volume_get(admin_ctxt, dst_vol_id).source_volid)
# locked
self.volume.delete_volume(self.context, dst_vol_id, volume=dst_vol)
mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
mock_lock.assert_called_with('%s-delete_volume' % src_vol_id)
def test_create_volume_from_volume_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id, volume=src_vol)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
orig_elevated = self.context.elevated
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.stubs.Set(self.context, 'elevated', orig_elevated)
# we expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume,
self.context,
volume_id=dst_vol.id,
request_spec={'source_volid': src_vol_id},
volume=dst_vol)
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.stubs.Set(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_volume(self.context, src_vol_id, volume=src_vol)
# we expect the volume create to fail with the following err since the
# source | |
#!/usr/bin/python3
# Copyright © 2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 OR MIT
import argparse
import os
import sys
import pathlib
import shutil
import subprocess
import prctl
import signal
import toml
import pexpect
import plumbum
import re
import errno
from time import sleep
import tempfile
from plumbum import colors, local, SshMachine
from plumbum.commands import ProcessExecutionError
from plumbum.cmd import whoami, python3, cat, getent, whoami
try:
from plumbum.cmd import xargo
except ImportError as e:
print("Unable to find the `xargo` binary in your $PATH")
print("")
print("Make sure to invoke `setup.sh` to install it.")
print("If you did that already, make sure the rust toolchain is on your path:")
print("Invoke `source $HOME/.cargo/env`")
sys.exit(errno.ENOENT)
def exception_handler(exception_type, exception, traceback):
print("%s: %s" % (exception_type.__name__, exception))
#
# run.py script settings
#
SCRIPT_PATH = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))
CARGO_DEFAULT_ARGS = ["--color", "always"]
ARCH = "x86_64"
# TODO: should be generated for enabling parallel builds
QEMU_TAP_NAME = 'tap0'
QEMU_TAP_ZONE = '172.31.0.20/24'
#
# Important globals
#
BOOTLOADER_PATH = (SCRIPT_PATH / '..').resolve() / 'bootloader'
TARGET_PATH = (SCRIPT_PATH / '..').resolve() / 'target'
KERNEL_PATH = SCRIPT_PATH
LIBS_PATH = (SCRIPT_PATH / '..').resolve() / 'lib'
USR_PATH = (SCRIPT_PATH / '..').resolve() / 'usr'
UEFI_TARGET = "{}-uefi".format(ARCH)
KERNEL_TARGET = "{}-nrk".format(ARCH)
USER_TARGET = "{}-nrk-none".format(ARCH)
USER_RUSTFLAGS = "-Clink-arg=-zmax-page-size=0x200000"
#
# Command line argument parser
#
parser = argparse.ArgumentParser()
# General build arguments
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument("-n", "--norun", action="store_true",
help="Only build, don't run")
parser.add_argument("-r", "--release", action="store_true",
help="Do a release build.")
parser.add_argument("--kfeatures", type=str, nargs='+', default=[],
help="Cargo features to enable (in the kernel).")
parser.add_argument("--no-kfeatures", action="store_true", default=False,
help="Disable default Cargo features (in the kernel).", required=False)
parser.add_argument("--ufeatures", type=str, nargs='+', default=[],
help="Cargo features to enable (in user-space, use module_name:feature_name syntax to specify module specific features, e.g. init:print-test).")
parser.add_argument('-m', '--mods', nargs='+', default=['init'],
help='User-space modules to be included in build & deployment', required=False)
parser.add_argument("--cmd", type=str,
help="Command line arguments passed to the kernel.")
parser.add_argument("--machine",
help='Which machine to run on (defaults to qemu)', required=False, default='qemu')
# QEMU related arguments
parser.add_argument("--qemu-nodes", type=int,
help="How many NUMA nodes and sockets (for qemu).", required=False, default=None)
parser.add_argument("--qemu-cores", type=int,
help="How many cores (will get evenly divided among nodes).", default=1)
parser.add_argument("--qemu-memory", type=int,
help="How much total memory in MiB (will get evenly divided among nodes).", default=1024)
parser.add_argument("--qemu-pmem", type=int,
help="How much total peristent memory in MiB (will get evenly divided among nodes).", required=False, default=0)
parser.add_argument("--qemu-affinity", action="store_true", default=False,
help="Pin QEMU instance to dedicated host cores.")
parser.add_argument("--qemu-prealloc", action="store_true", default=False,
help="Pre-alloc memory for the guest", required=False)
parser.add_argument("--qemu-large-pages", action="store_true", default=False,
help="Use large-pages on the host for guest memory", required=False)
parser.add_argument("--qemu-settings", type=str,
help="Pass additional generic QEMU arguments.")
parser.add_argument("--qemu-monitor", action="store_true",
help="Launch the QEMU monitor (for qemu)")
parser.add_argument("--pvrdma", action="store_true",
help="Add para-virtual RDMA device (for qemu)", default=False)
parser.add_argument("-d", "--qemu-debug-cpu", action="store_true",
help="Debug CPU reset (for qemu)")
parser.add_argument('--nic', default='e1000', choices=["e1000", "virtio", "vmxnet3"],
help='What NIC model to use for emulation', required=False)
parser.add_argument('--kgdb', action="store_true",
help="Use the GDB remote debugger to connect to the kernel")
parser.add_argument('--qemu-ivshmem',
type=int,
help="Enable the ivshmem device with the size in MiB.",
required=False,
default=0)
parser.add_argument('--qemu-shmem-path',
type=str,
help="Provide shared memory file path.",
required=False,
default="")
# Baremetal argument
parser.add_argument('--configure-ipxe', action="store_true", default=False,
help='Execute pre-boot setup for bare-metal booting.', required=False)
parser.add_argument('--no-reboot', action="store_true", default=False,
help='Do not initiate a machine reboot.', required=False)
NRK_EXIT_CODES = {
0: "[SUCCESS]",
1: "[FAIL] ReturnFromMain: main() function returned to arch_indepdendent part.",
2: "[FAIL] Encountered kernel panic.",
3: "[FAIL] Encountered OOM.",
4: "[FAIL] Encountered unexpected Interrupt.",
5: "[FAIL] General Protection Fault.",
6: "[FAIL] Unexpected Page Fault.",
7: "[FAIL] Unexpected process exit code when running a user-space test.",
8: "[FAIL] Unexpected exception during kernel initialization.",
9: "[FAIL] Got unrecoverable error (machine check, double fault)."
}
def log(msg):
print(colors.bold | ">>>", end=" "),
print(colors.bold.reset & colors.info | msg)
def build_bootloader(args):
"Builds the bootloader, copies the binary in the target UEFI directory"
log("Build bootloader")
uefi_build_args = ['build', '--target', UEFI_TARGET]
uefi_build_args += ['--package', 'bootloader']
uefi_build_args += CARGO_DEFAULT_ARGS
with local.cwd(BOOTLOADER_PATH):
with local.env(RUST_TARGET_PATH=BOOTLOADER_PATH.absolute()):
if args.verbose:
print("cd {}".format(BOOTLOADER_PATH))
print("RUST_TARGET_PATH={} xargo ".format(
BOOTLOADER_PATH.absolute()) + " ".join(uefi_build_args))
xargo(*uefi_build_args)
def build_kernel(args):
"Builds the kernel binary"
log("Build kernel")
with local.cwd(KERNEL_PATH):
with local.env(RUST_TARGET_PATH=(KERNEL_PATH / 'src' / 'arch' / ARCH).absolute()):
# TODO(cross-compilation): in case we use a cross compiler/linker
# also set: CARGO_TARGET_X86_64_NRK_LINKER=x86_64-elf-ld
build_args = ['build', '--target', KERNEL_TARGET]
if args.no_kfeatures:
build_args += ["--no-default-features"]
for feature in args.kfeatures:
build_args += ['--features', feature]
build_args += CARGO_DEFAULT_ARGS
if args.verbose:
print("cd {}".format(KERNEL_PATH))
print("RUST_TARGET_PATH={} xargo ".format(
KERNEL_PATH / 'src' / 'arch' / ARCH) + " ".join(build_args))
xargo(*build_args)
def build_user_libraries(args):
"Builds nrk vibrio lib to provide runtime support for other rump based apps"
log("Build user-space lib vibrio")
build_args = ['build', '--target', USER_TARGET]
build_args += ["--features", "rumprt"]
if args.nic == "virtio":
build_args += ["--features", "virtio"]
# else: use e1000 / wm0
build_args += CARGO_DEFAULT_ARGS
# Make sure we build a static (.a) vibrio library
# For linking with rumpkernel
with local.cwd(LIBS_PATH / "vibrio"):
with local.env(RUSTFLAGS=USER_RUSTFLAGS):
with local.env(RUST_TARGET_PATH=USR_PATH.absolute()):
if args.verbose:
print("cd {}".format(LIBS_PATH / "vibrio"))
print("RUSTFLAGS={} RUST_TARGET_PATH={} xargo ".format(USER_RUSTFLAGS,
USR_PATH.absolute()) + " ".join(build_args))
xargo(*build_args)
def build_userspace(args):
"Builds user-space programs"
build_args_default = ['build', '--target', USER_TARGET]
build_args_default += CARGO_DEFAULT_ARGS
for module in args.mods:
if not (USR_PATH / module).exists():
log("User module {} not found, skipping.".format(module))
continue
with local.cwd(USR_PATH / module):
with local.env(RUSTFLAGS=USER_RUSTFLAGS):
with local.env(RUST_TARGET_PATH=USR_PATH.absolute()):
build_args = build_args_default.copy()
for feature in args.ufeatures:
if ':' in feature:
mod_part, feature_part = feature.split(':')
if module == mod_part:
build_args += ['--features', feature_part]
else:
build_args += ['--features', feature]
log("Build user-module {}".format(module))
if args.verbose:
print("cd {}".format(USR_PATH / module))
print("RUSTFLAGS={} RUST_TARGET_PATH={} xargo ".format(
USER_RUSTFLAGS, USR_PATH.absolute()) + " ".join(build_args))
xargo(*build_args)
def deploy(args):
"""
Deploys everything that got built to the UEFI ESP directory
Also builds a disk image (.img file)
"""
log("Deploy binaries")
# Clean up / create ESP dir structure
debug_release = 'release' if args.release else 'debug'
uefi_build_path = TARGET_PATH / UEFI_TARGET / debug_release
user_build_path = TARGET_PATH / USER_TARGET / debug_release
kernel_build_path = TARGET_PATH / KERNEL_TARGET / debug_release
# Clean and create_esp dir:
esp_path = uefi_build_path / 'esp'
if esp_path.exists() and esp_path.is_dir():
shutil.rmtree(esp_path, ignore_errors=False)
esp_boot_path = esp_path / "EFI" / "Boot"
esp_boot_path.mkdir(parents=True, exist_ok=True)
# Deploy bootloader
shutil.copy2(kernel_build_path / 'nrk', os.getcwd())
shutil.copy2(kernel_build_path / 'nrk', esp_path / 'kernel')
# Deploy kernel
shutil.copy2(uefi_build_path / 'bootloader.efi',
esp_boot_path / 'BootX64.efi')
# Write kernel cmd-line file in ESP dir
with open(esp_path / 'cmdline.in', 'w') as cmdfile:
if args.cmd:
cmdfile.write('./kernel {}'.format(args.cmd))
else:
cmdfile.write('./kernel')
deployed = []
# Deploy user-modules
for module in args.mods:
if not (user_build_path / module).is_file():
log("[WARN] Module not found: {}".format(module))
continue
if module != "rkapps":
shutil.copy2(user_build_path / module, esp_path)
deployed.append(module)
else:
# TODO(ugly): Special handling of the rkapps module
# (they end up being built as multiple .bin binaries)
to_copy = [app for app in user_build_path.glob(
"*.bin") if app.is_file()]
deployed.extend([f.name for f in to_copy])
for app in to_copy:
shutil.copy2(app, esp_path)
# Write kernel cmd-line file in ESP dir
with open(esp_path / 'boot.php', 'w') as boot_file:
ipxe_script = """#!ipxe
imgfetch EFI/Boot/BootX64.efi
imgfetch kernel
imgfetch cmdline.in
{}
boot EFI/Boot/BootX64.efi
""".format('\n'.join(['imgfetch {}'.format(m) for m in deployed]))
boot_file.write(ipxe_script)
def run_qemu(args):
"""
Run the kernel on a QEMU instance.
"""
from plumbum.cmd import sudo, tunctl, ifconfig, corealloc
from plumbum.machines import LocalCommand
from packaging import version
if args.qemu_pmem:
required_version = version.parse("6.0.0")
version_check = ['/usr/bin/env'] + \
['qemu-system-x86_64'] + ['-version']
# TODO: Ad-hoc approach to find version number. Can we improve it?
ver = str(subprocess.check_output(version_check)
).split(' ')[3].split('\\n')[0]
if version.parse(ver) < required_version:
print("Update Qemu to version {} or higher".format(required_version))
sys.exit(errno.EACCES)
log("Starting QEMU")
debug_release = 'release' if args.release else 'debug'
esp_path = TARGET_PATH / UEFI_TARGET / debug_release / 'esp'
qemu_default_args = ['-no-reboot']
# Setup KVM and required guest hardware features
qemu_default_args += ['-enable-kvm']
qemu_default_args += ['-cpu',
'host,migratable=no,+invtsc,+tsc,+x2apic,+fsgsbase']
# Use serial communication
# '-nographic',
qemu_default_args += ['-display', 'none', '-serial', 'stdio']
if args.kgdb:
# Add a second serial line (VM I/O port 0x2f8 <-> localhost:1234) that
# we use to connect with gdb
qemu_default_args += ['-serial', 'tcp:127.0.0.1:1234,server,nowait']
# Add UEFI bootloader support
qemu_default_args += ['-drive',
'if=pflash,format=raw,file={}/OVMF_CODE.fd,readonly=on'.format(BOOTLOADER_PATH)]
qemu_default_args += ['-drive',
'if=pflash,format=raw,file={}/OVMF_VARS.fd,readonly=on'.format(BOOTLOADER_PATH)]
qemu_default_args += ['-device', 'ahci,id=ahci,multifunction=on']
qemu_default_args += ['-drive',
'if=none,format=raw,file=fat:rw:{},id=esp'.format(esp_path)]
qemu_default_args += ['-device', 'ide-hd,bus=ahci.0,drive=esp']
# Debug port to exit qemu and communicate back exit-code for tests
qemu_default_args += ['-device',
'isa-debug-exit,iobase=0xf4,iosize=0x04']
if args.qemu_ivshmem:
if not args.qemu_shmem_path:
print("Provide path to the shared memory file.")
sys.exit(errno.EINVAL)
qemu_default_args += [
'-object',
'memory-backend-file,size={}M,mem-path={},share=on,id=HMB'.format(
args.qemu_ivshmem, args.qemu_shmem_path)
]
qemu_default_args += ['-device', 'ivshmem-plain,memdev=HMB']
# Enable networking with outside world
if args.nic != "vmxnet3":
qemu_default_args += ['-net',
'nic,model={},netdev=n0'.format(args.nic)]
qemu_default_args += ['-netdev',
'tap,id=n0,script=no,ifname={}'.format(QEMU_TAP_NAME)]
else:
qemu_default_args += ['-device',
'vmxnet3,netdev=n1,mac=56:b4:44:e9:62:dc,addr=10.0']
qemu_default_args += ['-netdev',
'tap,id=n1,script=no,ifname={}'.format(QEMU_TAP_NAME)]
# qemu_default_args += ['-net', 'none']
def numa_nodes_to_list(file):
nodes = []
good_nodes = cat[file]().split(',')
for node_range in good_nodes:
if "-" in node_range:
nlow, nmax = node_range.split('-')
for i in range(int(nlow), int(nmax)+1):
nodes.append(i)
else:
nodes.append(int(node_range.strip()))
return nodes
def query_host_numa():
mem_nodes = numa_nodes_to_list(
"/sys/devices/system/node/has_memory")
cpu_nodes = numa_nodes_to_list("/sys/devices/system/node/has_cpu")
# Now return | |
7 words)
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1/7., rel=1e-3)
# a occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1/7., rel=1e-3)
# programming occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1/7., rel=1e-3)
# language occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1/7., rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 2/7.,
"python": 1/7.,
"is": 1/7.,
"a": 1/7.,
"programming": 1/7.,
"language": 1/7.,
}, rel=1e-3)
def test_freq_encoder_limit_vocab():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = BowEncoder(limit_vocabulary=8, mode="freq")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 8 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 8)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(1, rel=1e-3)
# four of them are OOV (is, multi, paradigm, language) ...
assert encoded_test_sentences[0, 1] == approx(4/7., rel=1e-3)
# python occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(1/7., rel=1e-3)
# a occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1/7., rel=1e-3)
# programming occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1/7., rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 4/7.,
"python": 1/7.,
"a": 1/7.,
"programming": 1/7.,
}, rel=1e-3)
def test_freq_encoder_limit_vocab_and_top_words():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = BowEncoder(skip_top_words=3, limit_vocabulary=20, mode="freq")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 20 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 20)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(1, rel=1e-3)
# three of them are OOV (multi, paradigm, python, and a)
assert encoded_test_sentences[0, 1] == approx(4/7., rel=1e-3)
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1/7., rel=1e-3)
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1/7., rel=1e-3)
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1/7., rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 4/7.,
"is": 1/7.,
"programming": 1/7.,
"language": 1/7.,
}, rel=1e-3)
def test_tfidf_encoder_default():
encoder = BowEncoder(mode="tfidf")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 100 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 100)
# test sentence tfidf sum (over all 7 words)
assert np.sum(encoded_test_sentences) == approx(9.706, rel=1e-3)
# two of them are OOV (multi and paradigm)
assert encoded_test_sentences[0, 1] == approx(3.898, rel=1e-3)
# python occurs once
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(0.826, rel=1e-3)
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1.386, rel=1e-3)
# a occurs once
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1.029, rel=1e-3)
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1.178, rel=1e-3)
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1.386, rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 3.898,
"python": 0.826,
"is": 1.386,
"a": 1.029,
"programming": 1.178,
"language": 1.386,
}, rel=1e-3)
def test_tfidf_encoder_limit_vocab():
# build a vocab of size 8 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = BowEncoder(limit_vocabulary=8, mode="tfidf")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 8 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 8)
# test sentence tfidf sum (over all 7 words)
assert np.sum(encoded_test_sentences) == approx(8.529, rel=1e-3)
# four of them are OOV (is, multi, paradigm, language) ...
assert encoded_test_sentences[0, 1] == approx(5.494, rel=1e-3)
# python occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(0.826, rel=1e-3)
# a occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1.029, rel=1e-3)
# programming occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1.178, rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 5.494,
"python": 0.826,
"a": 1.029,
"programming": 1.178,
}, rel=1e-3)
def test_tfidf_encoder_limit_vocab_and_top_words():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = BowEncoder(skip_top_words=3, limit_vocabulary=20, mode="tfidf")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 20 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 20)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(9.706, rel=1e-3)
# three of them are OOV (multi, paradigm, python, and a), however, current tfidf aggregation for oov is broken
# TODO fix oov aggregation for top k (currently only implemented as: tfidf(OOV)+tfidf(top1)+tfidf(top2)+...)
assert encoded_test_sentences[0, 1] > 3.898
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1.386, rel=1e-3)
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1.178, rel=1e-3)
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1.386, rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": encoded_test_sentences[0, 1],
"is": 1.386,
"programming": 1.178,
"language": 1.386,
}, rel=1e-3)
def test_sequence_encoder():
encoder = TokenSequenceEncoder()
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# sentence consists of 7 words + <START> + <END> token
assert encoded_test_sentences.shape == (2, 9)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 1] == 5
# third word is 'is' (7th most common + 4 reserved token)
assert encoded_test_sentences[0, 2] == 10
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 3] == 6
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# seventh word is 'programming' (4th most common + 4 reserved token)
assert encoded_test_sentences[0, 6] == 7
# eighth word is 'language' (8th most common + 4 reserved token)
assert encoded_test_sentences[0, 7] == 11
# last word is '<END>'
assert encoded_test_sentences[0, 8] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :6],
np.array([encoder.padding_token_index]*6))
# first word after is '<START>'
assert encoded_test_sentences[1, 6] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 7] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 8] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False)
assert sequence_list == ["python", "is", "a", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False)
assert sequence_list == ["and"]
# decode w/ control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=True)
assert sequence_list == ["<START>", "python", "is", "a", "<OOV>", "<OOV>", "programming", "language", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=True)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "and", "<END>"]
def test_sequence_encoder_limit_vocab():
# build a vocab of size 10 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = TokenSequenceEncoder(limit_vocabulary=10)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# sentence consists of 7 words + <START> token + <END> token
assert encoded_test_sentences.shape == (2, 9)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved | |
import gym
from gym.spaces import Discrete, MultiDiscrete, Tuple
import numpy as np
from mujoco_worldgen.util.rotation import mat2quat
from mae_envs.wrappers.util import update_obs_space
from mae_envs.util.geometry import dist_pt_to_cuboid
from copy import deepcopy
from itertools import compress
class GrabObjWrapper(gym.Wrapper):
'''
Allows agents to grab an object using a weld constraint.
Args:
body_names (list): list of body names that the agent can grab
radius_multiplier (float): How far away can this be activated (multiplier on box size)
grab_dist (float): If set, the object is held at a specific distance during
grabbing (default: None).
Note: This does not work well with oblong objects
grab_exclusive (bool): If set true, each object can only be grabbed by
a single agent. If several agents attempt to
grab the same object, only the closer agents succeeds.
obj_in_game_metadata_keys (list of string): keys in metadata with boolean array saying
which objects are currently in the game. This is used in the event we are randomizing
number of objects
'''
def __init__(self, env, body_names, radius_multiplier=1.7,
grab_dist=None, grab_exclusive=False,
obj_in_game_metadata_keys=None):
super().__init__(env)
self.n_agents = self.unwrapped.n_agents
self.body_names = body_names
self.n_obj = len(body_names)
self.obj_in_game_metadata_keys = obj_in_game_metadata_keys
self.action_space.spaces['action_pull'] = (
Tuple([MultiDiscrete([2] * self.n_obj) for _ in range(self.n_agents)]))
self.observation_space = update_obs_space(
env, {'obj_pull': (self.n_obj, 1),
'you_pull': (self.n_obj, self.n_agents)})
self.grab_radius = radius_multiplier * self.metadata['box_size']
self.grab_dist = grab_dist
self.grab_exclusive = grab_exclusive
def observation(self, obs):
obs['you_pull'] = self.obj_grabbed.T
obs['obj_pull'] = np.any(obs['you_pull'], axis=-1, keepdims=True)
return obs
def reset(self):
obs = self.env.reset()
sim = self.unwrapped.sim
if self.obj_in_game_metadata_keys is not None:
self.actual_body_slice = np.concatenate([self.metadata[k] for k in self.obj_in_game_metadata_keys])
else:
self.actual_body_slice = np.ones((len(self.body_names))).astype(np.bool)
actual_body_names = list(compress(self.body_names, self.actual_body_slice))
self.n_obj = len(actual_body_names)
# Cache body ids
self.obj_body_idxs = np.array([sim.model.body_name2id(body_name) for body_name in actual_body_names])
self.agent_body_idxs = np.array([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])
# Cache geom ids
self.obj_geom_ids = np.array([sim.model.geom_name2id(body_name) for body_name in actual_body_names])
self.agent_geom_ids = np.array([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])
# Cache constraint ids
self.agent_eq_ids = np.array(
[i for i, obj1 in enumerate(sim.model.eq_obj1id)
if sim.model.body_names[obj1] == f"agent{i}:particle"])
assert len(self.agent_eq_ids) == self.n_agents
# turn off equality constraints
sim.model.eq_active[self.agent_eq_ids] = 0
self.obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
self.last_obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
return self.observation(obs)
def grab_obj(self, action):
'''
Implements object grabbing for all agents
Args:
action: Action dictionary
'''
action_pull = action['action_pull'][:, self.actual_body_slice]
sim = self.unwrapped.sim
agent_pos = sim.data.body_xpos[self.agent_body_idxs]
obj_pos = sim.data.body_xpos[self.obj_body_idxs]
obj_width = sim.model.geom_size[self.obj_geom_ids]
obj_quat = sim.data.body_xquat[self.obj_body_idxs]
assert len(obj_width) == len(obj_quat), (
"Number of object widths must be equal to number of quaternions for direct distance calculation method. " +
"This might be caused by a body that contains several geoms.")
obj_dist = dist_pt_to_cuboid(agent_pos, obj_pos, obj_width, obj_quat)
allowed_and_desired = np.logical_and(action_pull, obj_dist <= self.grab_radius)
obj_dist_masked = obj_dist.copy() # Mask the obj dists to find a valid argmin
obj_dist_masked[~allowed_and_desired] = np.inf
if self.grab_exclusive:
closest_obj = np.zeros((self.n_agents,), dtype=int)
while np.any(obj_dist_masked < np.inf):
# find agent and object of closest object distance
agent_idx, obj_idx = np.unravel_index(np.argmin(obj_dist_masked), obj_dist_masked.shape)
# set closest object for this agent
closest_obj[agent_idx] = obj_idx
# ensure exclusivity of grabbing
obj_dist_masked[:, obj_idx] = np.inf
obj_dist_masked[agent_idx, :] = np.inf
# mark same object as undesired for all other agents
allowed_and_desired[:agent_idx, obj_idx] = False
allowed_and_desired[(agent_idx + 1):, obj_idx] = False
else:
closest_obj = np.argmin(obj_dist_masked, axis=-1)
valid_grabs = np.any(allowed_and_desired, axis=-1) # (n_agent,) which agents have valid grabs
# Turn on/off agents with valid grabs
sim.model.eq_active[self.agent_eq_ids] = valid_grabs
sim.model.eq_obj2id[self.agent_eq_ids] = self.obj_body_idxs[closest_obj]
# keep track of which object is being grabbed
self.obj_grabbed = np.zeros((self.n_agents, self.n_obj), dtype=bool)
agent_with_valid_grab = np.argwhere(valid_grabs)[:, 0]
self.obj_grabbed[agent_with_valid_grab, closest_obj[agent_with_valid_grab]] = 1
# If there are new grabs, then setup the weld constraint parameters
new_grabs = np.logical_and(
valid_grabs, np.any(self.obj_grabbed != self.last_obj_grabbed, axis=-1))
for agent_idx in np.argwhere(new_grabs)[:, 0]:
agent_rot = sim.data.body_xmat[self.agent_body_idxs[agent_idx]].reshape((3, 3))
obj_rot = sim.data.body_xmat[self.obj_body_idxs[closest_obj[agent_idx]]].reshape((3, 3))
# Need to use the geom xpos rather than the qpos
obj_pos = sim.data.body_xpos[self.obj_body_idxs[closest_obj[agent_idx]]]
agent_pos = sim.data.body_xpos[self.agent_body_idxs[agent_idx]]
grab_vec = agent_pos - obj_pos
if self.grab_dist is not None:
grab_vec = self.grab_dist / (1e-3 + np.linalg.norm(grab_vec)) * grab_vec
# The distance constraint needs to be rotated into the frame of reference of the agent
sim.model.eq_data[self.agent_eq_ids[agent_idx], :3] = np.matmul(agent_rot.T, grab_vec)
# The angle constraint is the difference between the agents frame and the objects frame
sim.model.eq_data[self.agent_eq_ids[agent_idx], 3:] = mat2quat(np.matmul(agent_rot.T, obj_rot))
self.last_obj_grabbed = self.obj_grabbed
def step(self, action):
self.grab_obj(action)
obs, rew, done, info = self.env.step(action)
return self.observation(obs), rew, done, info
class GrabClosestWrapper(gym.ActionWrapper):
'''
Convert the action_pull (either grab or pull) to a binary action rather than having the
dimension of boxes. The grab wrapper will only grab the closest box, so we convert
the new action into an all 1's action.
'''
def __init__(self, env):
super().__init__(env)
self.action_space = deepcopy(self.action_space)
self.n_obj = len(self.action_space.spaces['action_pull'].spaces[0].nvec)
self.action_space.spaces['action_pull'] = (
Tuple([Discrete(2) for _ in range(self.unwrapped.n_agents)]))
def action(self, action):
action = deepcopy(action)
action['action_pull'] = np.repeat(action['action_pull'][:, None], self.n_obj, -1)
return action
class LockObjWrapper(gym.Wrapper):
'''
Allows agents to lock objects at their current position.
Args:
body_names (list): list of body names that the agent can lock
radius_multiplier (float): How far away can this be activated (multiplier on box size)
agent_idx_allowed_to_lock (np array of ints): Indicies of agents that are allowed to lock.
Defaults to all
lock_type (string): Options are
any_lock: if any agent wants to lock an object it will get locked
all_lock: all agents that are close enough must want to lock the object
any_lock_specific: if any agent wants to lock an object it will get locked. However,
now the lock is agent specific, and only the agent that locked the object can unlock it.
all_lock_team_specific: like all_lock, but only team members of the agent that
locked the object can unlock it.
ac_obs_prefix (string): prefix for the action and observation keys. This is useful if using
the lock wrapper more than once.
obj_in_game_metadata_keys (list of string): keys in metadata with boolean array saying
which objects are currently in the game. This is used in the event we are randomizing
number of objects
agent_allowed_to_lock_keys (list of string): keys in obs determining whether agent is allowed
to lock a certain object. Each key should be a mask matrix of dim (n_agents, n_obj)
'''
def __init__(self, env, body_names, radius_multiplier=1.5, agent_idx_allowed_to_lock=None,
lock_type="any_lock", ac_obs_prefix='', obj_in_game_metadata_keys=None,
agent_allowed_to_lock_keys=None):
super().__init__(env)
self.n_agents = self.unwrapped.n_agents
self.n_obj = len(body_names)
self.body_names = body_names
self.agent_idx_allowed_to_lock = np.arange(self.n_agents) if agent_idx_allowed_to_lock is None else agent_idx_allowed_to_lock
self.lock_type = lock_type
self.ac_obs_prefix = ac_obs_prefix
self.obj_in_game_metadata_keys = obj_in_game_metadata_keys
self.agent_allowed_to_lock_keys = agent_allowed_to_lock_keys
self.action_space.spaces[f'action_{ac_obs_prefix}glue'] = (
Tuple([MultiDiscrete([2] * self.n_obj) for _ in range(self.n_agents)]))
self.observation_space = update_obs_space(env, {f'{ac_obs_prefix}obj_lock': (self.n_obj, 1),
f'{ac_obs_prefix}you_lock': (self.n_agents, self.n_obj, 1),
f'{ac_obs_prefix}team_lock': (self.n_agents, self.n_obj, 1)})
self.lock_radius = radius_multiplier*self.metadata['box_size']
self.obj_locked = np.zeros((self.n_obj,), dtype=int)
def observation(self, obs):
obs[f'{self.ac_obs_prefix}obj_lock'] = self.obj_locked[:, None]
you_lock = np.arange(self.n_agents)[:, None] == self.which_locked[None, :]
obs[f'{self.ac_obs_prefix}you_lock'] = np.expand_dims(you_lock * obs[f'{self.ac_obs_prefix}obj_lock'].T, axis=-1)
obs[f'{self.ac_obs_prefix}team_lock'] = np.zeros((self.n_agents, self.n_obj, 1))
for team in np.unique(self.metadata['team_index']):
team_mask = self.metadata['team_index'] == team
obs[f'{self.ac_obs_prefix}team_lock'][team_mask] = np.any(obs[f'{self.ac_obs_prefix}you_lock'][team_mask], 0)
return obs
def reset(self):
obs = self.env.reset()
sim = self.unwrapped.sim
if self.obj_in_game_metadata_keys is not None:
self.actual_body_slice = np.concatenate([self.metadata[k] for k in self.obj_in_game_metadata_keys])
else:
self.actual_body_slice = np.ones((len(self.body_names))).astype(np.bool)
actual_body_names = list(compress(self.body_names, self.actual_body_slice))
self.n_obj = len(actual_body_names)
# Cache ids
self.obj_body_idxs = np.array([sim.model.body_name2id(body_name) for body_name in actual_body_names])
self.obj_jnt_idxs = [np.where(sim.model.jnt_bodyid == body_idx)[0] for body_idx in self.obj_body_idxs]
self.obj_geom_ids = [np.where(sim.model.geom_bodyid == body_idx)[0] for body_idx in self.obj_body_idxs]
self.agent_body_idxs = np.array([sim.model.body_name2id(f"agent{i}:particle") for i in range(self.n_agents)])
self.agent_body_idxs = self.agent_body_idxs[self.agent_idx_allowed_to_lock]
self.agent_geom_ids = np.array([sim.model.geom_name2id(f'agent{i}:agent') for i in range(self.n_agents)])
self.agent_geom_ids = self.agent_geom_ids[self.agent_idx_allowed_to_lock]
self.unlock_objs()
self.obj_locked = np.zeros((self.n_obj,), dtype=bool)
self.which_locked = np.zeros((self.n_obj,), dtype=int)
if self.agent_allowed_to_lock_keys is not None:
self.agent_allowed_to_lock_mask = np.concatenate([obs[k] for k in self.agent_allowed_to_lock_keys])
else:
self.agent_allowed_to_lock_mask = np.ones((self.n_agents, self.n_obj))
return self.observation(obs)
def lock_obj(self, action_lock):
'''
Implements object gluing for all agents
Args:
lock: (n_agent, n_obj) boolean matrix
'''
sim = self.unwrapped.sim
action_lock = action_lock[self.agent_idx_allowed_to_lock]
action_lock = action_lock[:, self.actual_body_slice]
agent_pos = sim.data.body_xpos[self.agent_body_idxs]
obj_pos = sim.data.body_xpos[self.obj_body_idxs]
obj_width = sim.model.geom_size[np.concatenate(self.obj_geom_ids)]
obj_quat = sim.data.body_xquat[self.obj_body_idxs]
assert len(obj_width) == len(obj_quat), (
"Number of object widths must be equal to number of quaternions for direct distance calculation method. " +
"This might be caused by a body that contains several geoms.")
obj_dist = dist_pt_to_cuboid(agent_pos, obj_pos, obj_width, obj_quat)
allowed_and_desired = np.logical_and(action_lock, obj_dist <= self.lock_radius)
allowed_and_desired = np.logical_and(allowed_and_desired, self.agent_allowed_to_lock_mask)
allowed_and_not_desired = np.logical_and(1 - action_lock, obj_dist <= self.lock_radius)
allowed_and_not_desired = np.logical_and(allowed_and_not_desired, self.agent_allowed_to_lock_mask)
# objs_to_lock should _all_ be locked this round. new_objs_to_lock are objs that were not locked last | |
on_week_days=(FRIDAY(1),),
count=10)
start = datetime(1997, 9, 5, hour=9)
expected = (
datetime(1997, 9, 5, hour=9),
datetime(1997, 10, 3, hour=9),
datetime(1997, 11, 7, hour=9),
datetime(1997, 12, 5, hour=9),
datetime(1998, 1, 2, hour=9),
datetime(1998, 2, 6, hour=9),
datetime(1998, 3, 6, hour=9),
datetime(1998, 4, 3, hour=9),
datetime(1998, 5, 1, hour=9),
datetime(1998, 6, 5, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_nth_week_day_2(self):
"""Monthly on the first Friday.
RRULE:FREQ=MONTHLY;UNTIL=19971224T000000;BYDAY=1FR
DTSTART:19970905T090000
"""
rule = RecurrenceRule(MONTHLY,
on_week_days=(FRIDAY(1),),
until=datetime(1997, 12, 24))
start = datetime(1997, 9, 5, hour=9)
expected = (
datetime(1997, 9, 5, hour=9),
datetime(1997, 10, 3, hour=9),
datetime(1997, 11, 7, hour=9),
datetime(1997, 12, 5, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_nth_week_day_3(self):
"""Monthly on the second to last Monday.
RRULE:FREQ=MONTHLY;COUNT=6;BYDAY=-2MO
DTSTART:19970922T090000
"""
rule = RecurrenceRule(MONTHLY,
on_week_days=(MONDAY(-2),),
count=6)
start = datetime(1997, 9, 22, hour=9)
expected = (
datetime(1997, 9, 22, hour=9),
datetime(1997, 10, 20, hour=9),
datetime(1997, 11, 17, hour=9),
datetime(1997, 12, 22, hour=9),
datetime(1998, 1, 19, hour=9),
datetime(1998, 2, 16, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_every_2_months_on_nth_week_days(self):
"""Every other month on the first and last Sunday.
RRULE:FREQ=MONTHLY;INTERVAL=2;COUNT=10;BYDAY=1SU,-1SU
DTSTART:19970907T090000
"""
rule = RecurrenceRule(MONTHLY,
interval=2,
on_week_days=(SUNDAY(1), SUNDAY(-1)),
count=10)
start = datetime(1997, 9, 7, hour=9)
expected = (
datetime(1997, 9, 7, hour=9),
datetime(1997, 9, 28, hour=9),
datetime(1997, 11, 2, hour=9),
datetime(1997, 11, 30, hour=9),
datetime(1998, 1, 4, hour=9),
datetime(1998, 1, 25, hour=9),
datetime(1998, 3, 1, hour=9),
datetime(1998, 3, 29, hour=9),
datetime(1998, 5, 3, hour=9),
datetime(1998, 5, 31, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_month_day_1(self):
"""Monthly on the third to last day.
RRULE:FREQ=MONTHLY;BYMONTHDAY=-3;COUNT=6
DTSTART:19970928T090000
"""
rule = RecurrenceRule(MONTHLY,
on_month_days=(-3,),
count=6)
start = datetime(1997, 9, 28, hour=9)
expected = (
datetime(1997, 9, 28, hour=9),
datetime(1997, 10, 29, hour=9),
datetime(1997, 11, 28, hour=9),
datetime(1997, 12, 29, hour=9),
datetime(1998, 1, 29, hour=9),
datetime(1998, 2, 26, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_month_days_1(self):
"""Monthly on the 2nd and 15th days.
RRULE:FREQ=MONTHLY;COUNT=10;BYMONTHDAY=2,15
DTSTART:19970902T090000
"""
rule = RecurrenceRule(MONTHLY,
on_month_days=(2, 15),
count=10)
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1997, 9, 2, hour=9),
datetime(1997, 9, 15, hour=9),
datetime(1997, 10, 2, hour=9),
datetime(1997, 10, 15, hour=9),
datetime(1997, 11, 2, hour=9),
datetime(1997, 11, 15, hour=9),
datetime(1997, 12, 2, hour=9),
datetime(1997, 12, 15, hour=9),
datetime(1998, 1, 2, hour=9),
datetime(1998, 1, 15, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_month_days_2(self):
"""Monthly on the first and last days.
RRULE:FREQ=MONTHLY;COUNT=10;BYMONTHDAY=1,-1
DTSTART:19970930T090000
"""
rule = RecurrenceRule(MONTHLY,
on_month_days=(1, -1),
count=10)
start = datetime(1997, 9, 30, hour=9)
expected = (
datetime(1997, 9, 30, hour=9),
datetime(1997, 10, 1, hour=9),
datetime(1997, 10, 31, hour=9),
datetime(1997, 11, 1, hour=9),
datetime(1997, 11, 30, hour=9),
datetime(1997, 12, 1, hour=9),
datetime(1997, 12, 31, hour=9),
datetime(1998, 1, 1, hour=9),
datetime(1998, 1, 31, hour=9),
datetime(1998, 2, 1, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_every_18_months_on_month_days(self):
"""Every 18 months from the 10th to the 15th.
RRULE:FREQ=MONTHLY;INTERVAL=18;COUNT=10;BYMONTHDAY=10,11,12,13,14,15
DTSTART:19970910T090000
"""
rule = RecurrenceRule(MONTHLY,
interval=18,
on_month_days=(10, 11, 12, 13, 14, 15),
count=10)
start = datetime(1997, 9, 10, hour=9)
expected = (
datetime(1997, 9, 10, hour=9),
datetime(1997, 9, 11, hour=9),
datetime(1997, 9, 12, hour=9),
datetime(1997, 9, 13, hour=9),
datetime(1997, 9, 14, hour=9),
datetime(1997, 9, 15, hour=9),
datetime(1999, 3, 10, hour=9),
datetime(1999, 3, 11, hour=9),
datetime(1999, 3, 12, hour=9),
datetime(1999, 3, 13, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_every_2_months_on_week_day(self):
"""Every other month on Tuesdays.
RRULE:FREQ=MONTHLY;INTERVAL=2;BYDAY=TU;COUNT=18
DTSTART:19970902T090000
"""
rule = RecurrenceRule(MONTHLY,
interval=2,
on_week_days=(TUESDAY,),
count=18)
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1997, 9, 2, hour=9),
datetime(1997, 9, 9, hour=9),
datetime(1997, 9, 16, hour=9),
datetime(1997, 9, 23, hour=9),
datetime(1997, 9, 30, hour=9),
datetime(1997, 11, 4, hour=9),
datetime(1997, 11, 11, hour=9),
datetime(1997, 11, 18, hour=9),
datetime(1997, 11, 25, hour=9),
datetime(1998, 1, 6, hour=9),
datetime(1998, 1, 13, hour=9),
datetime(1998, 1, 20, hour=9),
datetime(1998, 1, 27, hour=9),
datetime(1998, 3, 3, hour=9),
datetime(1998, 3, 10, hour=9),
datetime(1998, 3, 17, hour=9),
datetime(1998, 3, 24, hour=9),
datetime(1998, 3, 31, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_week_dayand_month_day(self):
"""Monthly on every Friday 13th.
RRULE:FREQ=MONTHLY;BYDAY=FR;BYMONTHDAY=13;COUNT=5
DTSTART:19970902T090000
"""
rule = RecurrenceRule(MONTHLY,
on_month_days=(13,),
on_week_days=(FRIDAY,),
count=5)
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1998, 2, 13, hour=9),
datetime(1998, 3, 13, hour=9),
datetime(1998, 11, 13, hour=9),
datetime(1999, 8, 13, hour=9),
datetime(2000, 10, 13, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_week_day_and_month_days(self):
"""Monthly on the first Saturday following the first Sunday.
RRULE:FREQ=MONTHLY;BYDAY=SA;BYMONTHDAY=7,8,9,10,11,12,13;COUNT=10
DTSTART:19970913T090000
"""
rule = RecurrenceRule(MONTHLY,
on_month_days=(7, 8, 9, 10, 11, 12, 13),
on_week_days=(SATURDAY,),
count=10)
start = datetime(1997, 9, 13, hour=9)
expected = (
datetime(1997, 9, 13, hour=9),
datetime(1997, 10, 11, hour=9),
datetime(1997, 11, 8, hour=9),
datetime(1997, 12, 13, hour=9),
datetime(1998, 1, 10, hour=9),
datetime(1998, 2, 7, hour=9),
datetime(1998, 3, 7, hour=9),
datetime(1998, 4, 11, hour=9),
datetime(1998, 5, 9, hour=9),
datetime(1998, 6, 13, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_week_days_and_set_pos_1(self):
"""Monthly on the 3rd Tuesday, Wednesday, or Thursday.
RRULE:FREQ=MONTHLY;COUNT=3;BYDAY=TU,WE,TH;BYSETPOS=3
DTSTART:19970904T090000
"""
rule = RecurrenceRule(MONTHLY,
on_week_days=(TUESDAY, WEDNESDAY, THURSDAY),
on_set_pos=(3,),
count=3)
start = datetime(1997, 9, 4, hour=9)
expected = (
datetime(1997, 9, 4, hour=9),
datetime(1997, 10, 7, hour=9),
datetime(1997, 11, 6, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_week_days_and_set_pos_2(self):
"""Monthly on the second to last week day.
RRULE:FREQ=MONTHLY;BYDAY=MO,TU,WE,TH,FR;BYSETPOS=-2;COUNT=7
DTSTART:19970929T090000
"""
rule = RecurrenceRule(MONTHLY,
on_week_days=(
MONDAY,
TUESDAY,
WEDNESDAY,
THURSDAY,
FRIDAY,
),
on_set_pos=(-2,),
count=7)
start = datetime(1997, 9, 29, hour=9)
expected = (
datetime(1997, 9, 29, hour=9),
datetime(1997, 10, 30, hour=9),
datetime(1997, 11, 27, hour=9),
datetime(1997, 12, 30, hour=9),
datetime(1998, 1, 29, hour=9),
datetime(1998, 2, 26, hour=9),
datetime(1998, 3, 30, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly_on_month_days(self):
"""Monthly on the 15th and 30th.
Example where an invalid date (30th of February) is ignored.
RRULE:FREQ=MONTHLY;BYMONTHDAY=15,30;COUNT=5
DTSTART:20070115T090000
"""
rule = RecurrenceRule(MONTHLY,
on_month_days=(15, 30),
count=5,)
start = datetime(2007, 1, 15, hour=9)
expected = (
datetime(2007, 1, 15, hour=9),
datetime(2007, 1, 30, hour=9),
datetime(2007, 2, 15, hour=9),
datetime(2007, 3, 15, hour=9),
datetime(2007, 3, 30, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_monthly(self):
"""Monthly.
Example where invalid dates (31th of some months) are ignored.
RRULE:FREQ=MONTHLY;COUNT=12
DTSTART:20070131T090000
"""
rule = RecurrenceRule(MONTHLY,
count=12)
start = datetime(2007, 1, 31, hour=9)
expected = (
datetime(2007, 1, 31, hour=9),
datetime(2007, 3, 31, hour=9),
datetime(2007, 5, 31, hour=9),
datetime(2007, 7, 31, hour=9),
datetime(2007, 8, 31, hour=9),
datetime(2007, 10, 31, hour=9),
datetime(2007, 12, 31, hour=9),
datetime(2008, 1, 31, hour=9),
datetime(2008, 3, 31, hour=9),
datetime(2008, 5, 31, hour=9),
datetime(2008, 7, 31, hour=9),
datetime(2008, 8, 31, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_weekly_1(self):
"""Weekly.
RRULE:FREQ=WEEKLY;COUNT=10
DTSTART:19970902T090000
"""
rule = RecurrenceRule(WEEKLY,
count=10)
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1997, 9, 2, hour=9),
datetime(1997, 9, 9, hour=9),
datetime(1997, 9, 16, hour=9),
datetime(1997, 9, 23, hour=9),
datetime(1997, 9, 30, hour=9),
datetime(1997, 10, 7, hour=9),
datetime(1997, 10, 14, hour=9),
datetime(1997, 10, 21, hour=9),
datetime(1997, 10, 28, hour=9),
datetime(1997, 11, 4, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_weekly_2(self):
"""Weekly.
RRULE:FREQ=WEEKLY;UNTIL=19971224T000000
DTSTART:19970902T090000
"""
rule = RecurrenceRule(WEEKLY,
until=datetime(1997, 12, 24))
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1997, 9, 2, hour=9),
datetime(1997, 9, 9, hour=9),
datetime(1997, 9, 16, hour=9),
datetime(1997, 9, 23, hour=9),
datetime(1997, 9, 30, hour=9),
datetime(1997, 10, 7, hour=9),
datetime(1997, 10, 14, hour=9),
datetime(1997, 10, 21, hour=9),
datetime(1997, 10, 28, hour=9),
datetime(1997, 11, 4, hour=9),
datetime(1997, 11, 11, hour=9),
datetime(1997, 11, 18, hour=9),
datetime(1997, 11, 25, hour=9),
datetime(1997, 12, 2, hour=9),
datetime(1997, 12, 9, hour=9),
datetime(1997, 12, 16, hour=9),
datetime(1997, 12, 23, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_every_2_weeks(self):
"""Every other week.
With Sunday as the week start.
RRULE:FREQ=WEEKLY;INTERVAL=2;WKST=SU;COUNT=13
DTSTART:19970902T090000
"""
rule = RecurrenceRule(WEEKLY,
interval=2,
week_start=SUNDAY,
count=13)
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1997, 9, 2, hour=9),
datetime(1997, 9, 16, hour=9),
datetime(1997, 9, 30, hour=9),
datetime(1997, 10, 14, hour=9),
datetime(1997, 10, 28, hour=9),
datetime(1997, 11, 11, hour=9),
datetime(1997, 11, 25, hour=9),
datetime(1997, 12, 9, hour=9),
datetime(1997, 12, 23, hour=9),
datetime(1998, 1, 6, hour=9),
datetime(1998, 1, 20, hour=9),
datetime(1998, 2, 3, hour=9),
datetime(1998, 2, 17, hour=9),
)
self.assertEqual(tuple(rule.iterate_from(start)), expected)
def test_weekly_on_week_days_1(self):
"""Weekly on Tuesday and Thursday.
With Sunday as the week start.
RRULE:FREQ=WEEKLY;UNTIL=19971007T000000;WKST=SU;BYDAY=TU,TH
DTSTART:19970902T090000
"""
rule = RecurrenceRule(WEEKLY,
week_start=SUNDAY,
on_week_days=(TUESDAY, THURSDAY),
until=datetime(1997, 10, 7))
start = datetime(1997, 9, 2, hour=9)
expected = (
datetime(1997, 9, 2, hour=9),
datetime(1997, 9, 4, hour=9),
datetime(1997, 9, 9, hour=9),
datetime(1997, 9, 11, hour=9),
datetime(1997, 9, 16, hour=9),
datetime(1997, 9, 18, hour=9),
datetime(1997, 9, 23, hour=9),
datetime(1997, 9, 25, hour=9),
datetime(1997, 9, 30, hour=9),
datetime(1997, 10, | |
"operator_eval", "eval"]
ops = {
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"%": operator.mod,
"bitand": operator.and_,
"bitor": operator.or_,
"bitxor": operator.xor,
}
def build(self, tokens):
self.value = tokens[0]
_eval = self._eval = self.value[0].eval
ops = self.ops
operator_eval = self.operator_eval = [
(ops[op], val.eval) for op, val in pairs(self.value[1:])
]
if len(self.operator_eval) == 1:
op_func, rhs_eval = self.operator_eval[0]
self.eval = lambda context: op_func(_eval(context), rhs_eval(context))
else:
def eval(context):
prod = _eval(context)
for op_func, rhs_eval in operator_eval:
prod = op_func(prod, rhs_eval(context))
return prod
self.eval = eval
# def eval(self, context):
# prod = self._eval(context)
# for op_func, _eval in self.operator_eval:
# prod = op_func(prod, _eval(context))
# return prod
class EvalAddOp(Evaluator):
"Class to evaluate addition and subtraction expressions"
__slots__ = ["operator_eval", "value", "_eval", "eval"]
ops = {"+": operator.add, "-": operator.sub}
def build(self, tokens):
self.value = tokens[0]
_eval = self._eval = self.value[0].eval
ops = self.ops
operator_eval = self.operator_eval = [
(ops[op], val.eval) for op, val in pairs(self.value[1:])
]
if len(self.operator_eval) == 1:
op_func, rhs_eval = self.operator_eval[0]
self.eval = lambda context: op_func(_eval(context), rhs_eval(context))
else:
def eval(context):
prod = _eval(context)
for op_func, rhs_eval in operator_eval:
prod = op_func(prod, rhs_eval(context))
return prod
self.eval = eval
# def eval(self, context):
# sum = self._eval(context)
# for op_func, _eval in self.operator_eval:
# sum = op_func(sum, _eval(context))
# return sum
class EvalRangeOp(Evaluator):
__slots__ = ["_evals"]
def build(self, tokens):
self._evals = [t.eval for t in tokens[0][0::2]]
def eval(self, context):
a, b = self._evals
return ExpressionRange.create(context, a(context), b(context), inclusive=True)
class EvalExclusiveRangeOp(Evaluator):
__slots__ = ["_evals"]
def build(self, tokens):
self._evals = [t.eval for t in tokens[0][0::2]]
def eval(self, context):
a, b = self._evals
return ExpressionRange.create(context, a(context), b(context), inclusive=False)
class EvalTernaryOp(Evaluator):
__slots__ = ["evals"]
def build(self, tokens):
self.evals = [t.eval for t in tokens[0][::2]]
def eval(self, context):
condition, truthy, falsey = self.evals
if condition(context):
return truthy(context)
else:
return falsey(context)
def _match_re(a, b):
if isinstance(b, EvalRegExp):
return truth(b.match(text_type(a)))
return truth(re.match(b, text_type(a)))
def wildcard_match(name, pattern):
if isinstance(pattern, list):
return wildcard.match_any(pattern, name)
else:
return wildcard.match(pattern, name)
def _in_operator(a, b):
try:
return a in b
except:
return False
def _str_in(value, seq):
"""Return True if the string representation of a value equals a
string representation of any value in a sequence"""
try:
str_value = text_type(value)
return any(str_value == text_type(value) for value in seq)
except:
return False
class EvalComparisonOp(Evaluator):
"Class to evaluate comparison expressions"
__slots__ = ["value", "_eval", "operator_eval"]
opMap = {
"<": operator.lt,
"lt": operator.lt,
"<=": operator.le,
"lte": operator.le,
">": operator.gt,
"gt": operator.gt,
">=": operator.ge,
"gte": operator.ge,
"!=": operator.ne,
"==": operator.eq,
"~=": lambda a, b: text_type(a).lower() == text_type(b).lower(),
"^=": lambda a, b: text_type(a).startswith(text_type(b)),
"$=": lambda a, b: text_type(a).endswith(text_type(b)),
"is": operator.is_,
"is not": operator.is_not,
"in": _in_operator,
"not in": lambda a, b: not _in_operator(a, b),
"instr": _str_in,
"not instr": lambda a, b: not _str_in(a, b),
"matches": _match_re,
"fnmatches": wildcard_match,
}
def build(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [
(self.opMap[op], val.eval) for op, val in pairs(self.value[1:])
]
def eval(self, context):
val1 = self._eval(context)
for op_func, _eval in self.operator_eval:
val2 = _eval(context)
val1 = op_func(val1, val2)
if not val1:
return False
return True
class EvalFormatOp(Evaluator):
__slots__ = ["value", "_eval", "evals"]
def build(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.evals = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, context):
val1 = self._eval(context)
for _eval in self.evals:
fmt = _eval(context)
if not isinstance(fmt, string_types):
raise ValueError("format should be a string, not {!r}".format(fmt))
return format(val1, fmt)
return val1
class EvalLogicOpOR(Evaluator):
__slots__ = ["value", "_eval", "operator_eval"]
def build(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, context):
val1 = self._eval(context)
if val1:
return val1
for _eval in self.operator_eval:
val2 = _eval(context)
val1 = val1 or val2
if val1:
return val1
return val1
class EvalLogicOpAND(Evaluator):
__slots__ = ["value", "_eval", "operator_eval"]
def build(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, context):
val1 = self._eval(context)
if not val1:
return val1
for _eval in self.operator_eval:
val2 = _eval(context)
val1 = val1 and val2
if not val1:
return val1
return val1
word_characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789"
expr = Forward()
# define the parser
integer = Word(nums)
real = Combine(Word(nums) + "." + Word(nums))
constant = oneOf("True False None yes no") + WordEnd(word_characters)
# TODO: expand on variable regex
simple_variable = Regex(r"([a-zA-Z0-9_]+)")
variable = Regex(r"([a-zA-Z0-9\._]+)")
explicit_variable = "$" + Regex(r"([a-zA-Z0-9\._]+)")
current_scope = Literal("$$")
triple_string = QuotedString("'''", escChar=None, unquoteResults=False) | QuotedString(
'"""', escChar=None, unquoteResults=False
)
string = QuotedString('"', escChar="\\", unquoteResults=False) | QuotedString(
"'", escChar="\\", unquoteResults=False
)
regexp = QuotedString("/", escChar=None)
timespan = Combine(Word(nums) + oneOf("ms s m h d"))
current_scope_operand = current_scope
variable_operand = variable
# simple_variable_operand = simple_variable
explicit_variable_operand = explicit_variable
integer_operand = integer
real_operand = real
number_operand = real | integer
triple_string_operand = triple_string
string_operand = string
groupop = Literal(",")
signop = oneOf("+ -")
multop = oneOf("* / // % bitand bitor")
filterop = oneOf("|")
plusop = oneOf("+ -")
notop = Literal("not") + WordEnd(word_characters)
rangeop = Literal("..")
exclusiverangeop = Literal("...")
ternaryop = ("?", ":")
current_scope_operand.setParseAction(EvalCurrentScope)
variable_operand.setParseAction(EvalVariable)
explicit_variable_operand.setParseAction(EvalExplicitVariable)
integer_operand.setParseAction(EvalInteger)
real_operand.setParseAction(EvalReal)
triple_string.setParseAction(EvalTripleString)
string_operand.setParseAction(EvalString)
constant.setParseAction(EvalConstant)
regexp.setParseAction(EvalRegExp)
timespan.setParseAction(EvalTimespan)
modifier = Regex(r"([a-zA-Z][a-zA-Z0-9_]*)\:")
simple_list_operand = Group(delimitedList(expr))
simple_list_operand.setParseAction(EvalSimpleList)
list_operand = Suppress("[") + delimitedList(expr) + Suppress("]")
list_operand.setParseAction(EvalList)
empty_list_operand = Literal("[]")
empty_list_operand.setParseAction(EvalEmptyList)
dict_item = Group(expr + Suppress(Literal(":")) + expr)
dict_operand = Group(Suppress("{") + delimitedList(dict_item) + Suppress("}"))
dict_operand.setParseAction(EvalDict)
empty_dict_operand = Literal("{}")
empty_dict_operand.setParseAction(EvalEmptyDict)
function_operand = Group(Suppress("`") + expr + Suppress("`"))
function_operand.setParseAction(EvalFunction)
key_pair = Group(
Regex(r"([a-zA-Z0-9_]+)") + Suppress(Literal("=") + WordEnd("=!+-*/")) + expr
)
key_pair_dict_operand = delimitedList(key_pair)
key_pair_dict_operand.setParseAction(EvalKeyPairDict)
callop = Group(("(") + expr + Suppress(")"))
index = Group(("[") + expr + Suppress("]"))
_slice = Group(
Suppress("[") + delimitedList(Optional(expr, default=None), ":") + Suppress("]")
)
braceop = callop | index
sliceop = _slice
literalindex = Regex(r"\.([a-zA-Z0-9\._]+)")
operand = (
timespan
| real_operand
| integer_operand
| triple_string_operand
| string_operand
| regexp
| constant
| function_operand
| key_pair_dict_operand
| current_scope_operand
| explicit_variable_operand
| variable_operand
| empty_list_operand
| empty_dict_operand
| list_operand
| dict_operand
)
comparisonop = (
oneOf("< <= > >= != == ~= ^= $=")
| (Literal("not in") + WordEnd())
| (Literal("not instr") + WordEnd())
| (Literal("is not") + WordEnd())
| (oneOf("is in instr lt lte gt gte matches fnmatches") + WordEnd())
)
logicopOR = Literal("or") + WordEnd()
logicopAND = Literal("and") + WordEnd()
formatop = Literal("::")
expr << operatorPrecedence(
operand,
[
(signop, 1, opAssoc.RIGHT, EvalSignOp),
(exclusiverangeop, 2, opAssoc.LEFT, EvalExclusiveRangeOp),
(rangeop, 2, opAssoc.LEFT, EvalRangeOp),
(braceop, 1, opAssoc.LEFT, EvalBraceOp),
(sliceop, 1, opAssoc.LEFT, EvalSliceOp),
(literalindex, 1, opAssoc.LEFT, EvalLiteralIndex),
(modifier, 1, opAssoc.RIGHT, EvalModifierOp),
(formatop, 2, opAssoc.LEFT, EvalFormatOp),
(multop, 2, opAssoc.LEFT, EvalMultOp),
(plusop, 2, opAssoc.LEFT, EvalAddOp),
(filterop, 2, opAssoc.LEFT, EvalFilterOp),
(comparisonop, 2, opAssoc.LEFT, EvalComparisonOp),
(notop, 1, opAssoc.RIGHT, EvalNotOp),
(logicopAND, 2, opAssoc.LEFT, EvalLogicOpAND),
(logicopOR, 2, opAssoc.LEFT, EvalLogicOpOR),
(ternaryop, 3, opAssoc.LEFT, EvalTernaryOp),
],
)
# expr.validate()
class DummyLock(object):
"""Replacement for real lock that does nothing"""
def __enter__(self):
pass
def __exit__(self, *args, **kwargs):
pass
class Function(object):
__slots__ = ["expression", "scope"]
def __init__(self, expression, scope=None):
self.expression = expression
if scope is None:
scope = {}
self.scope = scope
def __repr__(self):
return '<function "{}">'.format(self.expression.exp)
def __call__(self, context, **params):
with context.data_frame(params):
with context.data_scope(self.scope):
return self.expression.eval(context)
def call(self, context, params):
with context.data_frame(params):
with context.data_scope(self.scope):
return self.expression.eval(context)
def get_callable(self, context):
def call(params):
with context.data_frame(params):
with context.data_scope(self.scope):
return self.expression.eval(context)
return call
def get_scope_callable(self, context):
def callscope(scope):
with context.data_scope(scope):
return self.expression.eval(context)
return callscope
@implements_to_string
class Expression(object):
"""Evaluate an arithmetic expression of context values"""
exp_cache = {}
new_expressions = set()
_lock = threading.RLock()
def __init__(self, exp):
self.exp = exp
self.compiled_exp = None
self._eval = self._lazy_compile_eval
self.new_expressions.add(exp)
def _lazy_compile_eval(self, context):
self.compile()
return self._eval(context)
def compile(self):
self.compiled_exp = self.compile_cache(self.exp)
self._eval = self.compiled_exp[0].eval
return self
def eval(self, context):
try:
obj = self._eval(context)
return (
obj.__moyacontext__(context) if hasattr(obj, "__moyacontext__") else obj
)
except (ExpressionError, MoyaException, LogicError):
raise
except ArithmeticError as e:
if isinstance(e, ZeroDivisionError):
throw(
"math.division-error",
"Can't divide by zero in '{}'".format(self.exp),
diagnosis="Check your math",
)
else:
throw("math.arithmetic-error", text_type(e))
except Exception as e:
if context[".develop"]:
print("In expression.eval {!r}".format(self))
import traceback
traceback.print_exc(e)
raise ExpressionEvalError(self.exp, original=e)
def make_function(self, context=None):
"""Returns a callable from this expression"""
return Function(self, context.obj if context else None)
def __repr__(self):
return "Expression(%r)" % self.exp
def __str__(self):
return self.exp
def __getstate__(self):
self.compiled_exp = self.compile_cache(self.exp)
return (self.exp, self.compiled_exp)
def __setstate__(self, state):
"""Bit of magic to lazily compile expressions after unpickling"""
self.exp, self.compiled_exp = state
self.exp_cache[self.exp] = self.compiled_exp
self._eval = self.compiled_exp[0].eval
@classmethod
def insert_expressions(cls, expressions):
exp_cache = cls.exp_cache
for expression in expressions:
if expression.exp not in exp_cache:
exp_cache[expression.exp] = expression.compiled_exp
@classmethod
def dump(cls, cache):
name = "expcache.{}.{}".format(VERSION, __version__)
cache.set(name, cls.exp_cache)
@classmethod
def load(cls, cache):
name = "expcache.{}.{}".format(VERSION, __version__)
exp = cache.get(name, None)
if exp is not None:
cls.exp_cache.update(exp)
return True
return False
@classmethod
def get_eval(cls, exp, context):
| |
"""
This is an explicit DG method: we invert the mass matrix and perform
a matrix-vector multiplication to get the solution in a time step
"""
from math import *
import mpi4py
import numpy as np
from time import time
import sys
import os
import cProfile
from firedrake import *
from firedrake.petsc import PETSc
from pyop2.utils import cached_property
from pyop2.profiling import timed_region
from pyop2.base import _trace, Dat, DataSet
from pyop2.fusion.interface import loop_chain
from pyop2.logger import info, set_log_level, INFO
import coffee.base as ast
from utils import parser, output_time, calculate_sdepth, FusionSchemes
class ElasticLF4(object):
r"""An elastic wave equation solver, using the finite element method
for spatial discretisation, and a fourth-order leap-frog time-stepping scheme."""
loop_chain_length = 28
num_solves = 8
def __init__(self, mesh, family, degree, dimension, output=1, params=None):
r""" Initialise a new elastic wave simulation.
:param mesh: The underlying computational mesh of vertices and edges.
:param str family: Specify whether CG or DG should be used.
:param int degree: Use polynomial basis functions of this degree.
:param int dimension: The spatial dimension of the problem (1, 2 or 3).
:param int output: period, in timesteps, to write solution fields to a file.
:param dict params: simulation and optimisation parameters
:returns: None
"""
self.degree = degree
self.mesh = mesh
self.dimension = dimension
self.output = output
self.tofile = params['tofile']
self.S = TensorFunctionSpace(mesh, family, degree, name='S')
self.U = VectorFunctionSpace(mesh, family, degree, name='U')
# Assumes that the S and U function spaces are the same.
self.S_tot_dofs = op2.MPI.COMM_WORLD.allreduce(self.S.dof_count, op=mpi4py.MPI.SUM)
self.U_tot_dofs = op2.MPI.COMM_WORLD.allreduce(self.U.dof_count, op=mpi4py.MPI.SUM)
info("Number of degrees of freedom (Velocity): %d" % self.U_tot_dofs)
info("Number of degrees of freedom (Stress): %d" % self.S_tot_dofs)
self.s = TrialFunction(self.S)
self.v = TestFunction(self.S)
self.u = TrialFunction(self.U)
self.w = TestFunction(self.U)
self.s0 = Function(self.S, name="StressOld")
self.sh1 = Function(self.S, name="StressHalf1")
self.stemp = Function(self.S, name="StressTemp")
self.sh2 = Function(self.S, name="StressHalf2")
self.s1 = Function(self.S, name="StressNew")
self.u0 = Function(self.U, name="VelocityOld")
self.uh1 = Function(self.U, name="VelocityHalf1")
self.utemp = Function(self.U, name="VelocityTemp")
self.uh2 = Function(self.U, name="VelocityHalf2")
self.u1 = Function(self.U, name="VelocityNew")
self.absorption_function = None
self.source_function = None
self.source_expression = None
self._dt = None
self._density = None
self._mu = None
self._l = None
self.n = FacetNormal(self.mesh)
self.I = Identity(self.dimension)
# Tiling options
self.tiling_size = params['tile_size']
self.tiling_uf = params['num_unroll']
self.tiling_mode = params['mode']
self.tiling_halo = params['extra_halo']
self.tiling_explicit = params['explicit_mode']
self.tiling_explicit_id = params['explicit_mode_id']
self.tiling_log = params['log']
self.tiling_sdepth = params['s_depth']
self.tiling_part = params['partitioning']
self.tiling_coloring = params['coloring']
self.tiling_glb_maps = params['use_glb_maps']
self.tiling_prefetch = params['use_prefetch']
# Mat-vec AST cache
self.asts = {}
if self.tofile:
# File output streams
platform = os.environ.get('NODENAME', 'unknown')
tmpdir = os.environ['TMPDIR']
base = os.path.join(tmpdir, 'output', platform,
'p%d' % self.degree, 'uf%d' % self.tiling_uf)
if op2.MPI.COMM_WORLD.rank == 0:
if not os.path.exists(base):
os.makedirs(base)
sub_dirs = [d for d in os.listdir(base)
if os.path.isdir(os.path.join(base, d))]
sub_dir = "%d_em%d_part%s_tile%s" % (len(sub_dirs),
self.tiling_explicit_id,
self.tiling_size if self.tiling_uf else 0,
self.tiling_part if self.tiling_uf else 'None')
base = os.path.join(base, sub_dir)
os.makedirs(base)
op2.MPI.COMM_WORLD.barrier()
base = op2.MPI.COMM_WORLD.bcast(base, root=0)
self.u_stream = File(os.path.join(base, 'velocity.pvd'))
self.s_stream = File(os.path.join(base, 'stress.pvd'))
@property
def absorption(self):
r""" The absorption coefficient :math:`\sigma` for the absorption term
.. math:: \sigma\mathbf{u}
where :math:`\mathbf{u}` is the velocity field.
"""
return self.absorption_function
@absorption.setter
def absorption(self, expression):
r""" Setter function for the absorption field.
:param firedrake.Expression expression: The expression to interpolate onto the absorption field.
"""
self.absorption_function.interpolate(expression)
# Source term
@property
def source(self):
r""" The source term on the RHS of the velocity (or stress) equation. """
return self.source_function
@source.setter
def source(self, expression):
r""" Setter function for the source field.
:param firedrake.Expression expression: The expression to interpolate onto the source field.
"""
self.source_function.interpolate(expression)
def assemble_inverse_mass(self):
r""" Compute the inverse of the consistent mass matrix for the velocity and stress equations.
:returns: None
"""
# Inverse of the (consistent) mass matrix for the velocity equation.
self.inverse_mass_velocity = assemble(inner(self.w, self.u)*dx, inverse=True)
self.inverse_mass_velocity.assemble()
self.imass_velocity = self.inverse_mass_velocity.M
# Inverse of the (consistent) mass matrix for the stress equation.
self.inverse_mass_stress = assemble(inner(self.v, self.s)*dx, inverse=True)
self.inverse_mass_stress.assemble()
self.imass_stress = self.inverse_mass_stress.M
def copy_massmatrix_into_dat(self):
# Copy the velocity mass matrix into a Dat
vmat = self.imass_velocity.handle
dofs_per_entity = self.U.fiat_element.entity_dofs()
dofs_per_entity = sum(self.mesh.make_dofs_per_plex_entity(dofs_per_entity))
arity = dofs_per_entity*self.U.topological.dim
self.velocity_mass_asdat = Dat(DataSet(self.mesh.cell_set, arity*arity), dtype='double')
istart, iend = vmat.getOwnershipRange()
idxs = [PETSc.IS().createGeneral(np.arange(i, i+arity, dtype=np.int32),
comm=PETSc.COMM_SELF)
for i in range(istart, iend, arity)]
submats = vmat.getSubMatrices(idxs, idxs)
for i, m in enumerate(submats):
self.velocity_mass_asdat.data[i] = m[:, :].flatten()
info("Computed velocity mass matrix")
# Copy the stress mass matrix into a Dat
smat = self.imass_stress.handle
dofs_per_entity = self.S.fiat_element.entity_dofs()
dofs_per_entity = sum(self.mesh.make_dofs_per_plex_entity(dofs_per_entity))
arity = dofs_per_entity*self.S.topological.dim
self.stress_mass_asdat = Dat(DataSet(self.mesh.cell_set, arity*arity), dtype='double')
istart, iend = smat.getOwnershipRange()
idxs = [PETSc.IS().createGeneral(np.arange(i, i+arity, dtype=np.int32),
comm=PETSc.COMM_SELF)
for i in range(istart, iend, arity)]
submats = smat.getSubMatrices(idxs, idxs)
for i, m in enumerate(submats):
self.stress_mass_asdat.data[i] = m[:, :].flatten()
info("Computed stress mass matrix")
@property
def form_uh1(self):
""" UFL for uh1 equation. """
F = inner(self.w, self.u)*dx - self.f(self.w, self.s0, self.u0, self.n, self.absorption)
return F
@cached_property
def rhs_uh1(self):
""" RHS for uh1 equation. """
return rhs(self.form_uh1)
@property
def form_stemp(self):
""" UFL for stemp equation. """
F = inner(self.v, self.s)*dx - self.g(self.v, self.uh1, self.I, self.n, self.l, self.mu, self.source)
return F
@cached_property
def rhs_stemp(self):
""" RHS for stemp equation. """
return rhs(self.form_stemp)
@property
def form_uh2(self):
""" UFL for uh2 equation. """
F = inner(self.w, self.u)*dx - self.f(self.w, self.stemp, self.u0, self.n, self.absorption)
return F
@cached_property
def rhs_uh2(self):
""" RHS for uh2 equation. """
return rhs(self.form_uh2)
@property
def form_u1(self):
""" UFL for u1 equation. """
# Note that we have multiplied through by dt here.
F = self.density*inner(self.w, self.u)*dx - self.density*inner(self.w, self.u0)*dx - self.dt*inner(self.w, self.uh1)*dx - ((self.dt**3)/24.0)*inner(self.w, self.uh2)*dx
return F
@cached_property
def rhs_u1(self):
""" RHS for u1 equation. """
return rhs(self.form_u1)
@property
def form_sh1(self):
""" UFL for sh1 equation. """
F = inner(self.v, self.s)*dx - self.g(self.v, self.u1, self.I, self.n, self.l, self.mu, self.source)
return F
@cached_property
def rhs_sh1(self):
""" RHS for sh1 equation. """
return rhs(self.form_sh1)
@property
def form_utemp(self):
""" UFL for utemp equation. """
F = inner(self.w, self.u)*dx - self.f(self.w, self.sh1, self.u1, self.n, self.absorption)
return F
@cached_property
def rhs_utemp(self):
""" RHS for utemp equation. """
return rhs(self.form_utemp)
@property
def form_sh2(self):
""" UFL for sh2 equation. """
F = inner(self.v, self.s)*dx - self.g(self.v, self.utemp, self.I, self.n, self.l, self.mu, self.source)
return F
@cached_property
def rhs_sh2(self):
""" RHS for sh2 equation. """
return rhs(self.form_sh2)
@property
def form_s1(self):
""" UFL for s1 equation. """
# Note that we have multiplied through by dt here.
F = inner(self.v, self.s)*dx - inner(self.v, self.s0)*dx - self.dt*inner(self.v, self.sh1)*dx - ((self.dt**3)/24.0)*inner(self.v, self.sh2)*dx
return F
@cached_property
def rhs_s1(self):
""" RHS for s1 equation. """
return rhs(self.form_s1)
def f(self, w, s0, u0, n, absorption=None):
""" The RHS of the velocity equation. """
f = -inner(grad(w), s0)*dx + inner(avg(s0)*n('+'), w('+'))*dS + inner(avg(s0)*n('-'), w('-'))*dS
if(absorption):
f += -inner(w, absorption*u0)*dx
return f
def g(self, v, u1, I, n, l, mu, source=None):
""" The RHS of the stress equation. """
g = - l*(v[i, j]*I[i, j]).dx(k)*u1[k]*dx + l*(jump(v[i, j], n[k])*I[i, j]*avg(u1[k]))*dS + l*(v[i, j]*I[i, j]*u1[k]*n[k])*ds - mu*inner(div(v), u1)*dx + mu*inner(avg(u1), jump(v, n))*dS - mu*inner(div(v.T), u1)*dx + mu*inner(avg(u1), jump(v.T, n))*dS + mu*inner(u1, dot(v, n))*ds + mu*inner(u1, dot(v.T, n))*ds
if(source):
g += inner(v, source)*dx
return g
def ast_matmul(self, F_a, implementation='optimized'):
"""Generate an AST for a PyOP2 kernel performing a matrix-vector multiplication."""
# The number of dofs on each element is /ndofs*cdim/
F_a_fs = F_a.function_space()
ndofs = F_a_fs.fiat_element.entity_dofs()
ndofs = sum(self.mesh.make_dofs_per_plex_entity(ndofs))
cdim = F_a_fs.dim
name = 'mat_vec_mul_kernel_%s' % F_a_fs.name
identifier = (ndofs, cdim, name, implementation)
if identifier in self.asts:
return self.asts[identifier]
from coffee import isa, options
if cdim and cdim % isa['dp_reg'] == 0:
simd_pragma = '#pragma simd reduction(+:sum)'
else:
simd_pragma = ''
# Craft the AST
if implementation == 'optimized' and cdim >= 4:
body = ast.Incr(ast.Symbol('sum'),
ast.Prod(ast.Symbol('A', ('i',), ((ndofs*cdim, 'j*%d + k' % cdim),)),
ast.Symbol('B', ('j', 'k'))))
body = ast.c_for('k', cdim, body, simd_pragma).children[0]
body = [ast.Decl('const int', ast.Symbol('index'), init=ast.Symbol('i%%%d' % cdim)),
ast.Decl('double', ast.Symbol('sum'), init=ast.Symbol('0.0')),
ast.c_for('j', ndofs, body).children[0],
ast.Assign(ast.Symbol('C', ('i/%d' % cdim, 'index')), 'sum')]
body = ast.Block([ast.c_for('i', ndofs*cdim, body).children[0]])
funargs = [ast.Decl('double* restrict', 'A'),
ast.Decl('double *restrict *restrict', 'B'),
ast.Decl('double *restrict *', 'C')]
fundecl = ast.FunDecl('void', name, funargs, body, ['static', 'inline'])
else:
body = ast.Incr(ast.Symbol('C', ('i/%d' % cdim, 'index')),
ast.Prod(ast.Symbol('A', ('i',), ((ndofs*cdim, 'j*%d + k' % cdim),)),
ast.Symbol('B', ('j', 'k'))))
body = ast.c_for('k', cdim, body).children[0]
body = [ast.Decl('const | |
<gh_stars>1-10
"""Asyncio HTTP/2 client and server sessions based on the :mod:`.nghttp2` Python
wrapper around the nghttp2 API.
"""
import asyncio
import logging
import io
import collections
from urllib.parse import urlparse
from ctypes import string_at
from . import nghttp2
from .messages import Request, Response, Direction
from .streams import read_data_source
logger = logging.getLogger(__name__)
recv_id = 0
send_id = 0
@nghttp2.on_frame_recv_callback
def on_frame_recv(session, frame, protocol):
stream_id = frame[0].hd.stream_id
frame_type = frame[0].hd.type
# print(protocol.__class__.__name__, "recv", nghttp2.frame_type(frame_type).name, f"stream_id={stream_id}")
# if frame[0].hd.flags & nghttp2.flag.END_STREAM:
# print(protocol.__class__.__name__, " END_STREAM")
# if frame[0].hd.flags & nghttp2.flag.ACK:
# print(protocol.__class__.__name__, " ACK")
# if frame_type == nghttp2.frame_type.RST_STREAM:
# print(protocol.__class__.__name__, f" error_code={nghttp2.error_code(frame[0].rst_stream.error_code).name}")
# if frame_type == nghttp2.frame_type.WINDOW_UPDATE:
# if stream_id == 0:
# print(protocol.__class__.__name__, f" connection_window_size={protocol.session.get_remote_window_size()}")
# else:
# print(protocol.__class__.__name__, f" stream_window_size={protocol.session.get_stream_remote_window_size(stream_id)}")
if frame[0].hd.flags & nghttp2.flag.END_HEADERS:
protocol.headers_received(stream_id)
if frame[0].hd.flags & nghttp2.flag.END_STREAM:
if frame_type in [nghttp2.frame_type.HEADERS, nghttp2.frame_type.DATA]:
protocol.content_received(stream_id)
if frame_type == nghttp2.frame_type.WINDOW_UPDATE:
protocol.window_update_received(stream_id)
elif frame_type == nghttp2.frame_type.GOAWAY:
protocol.goaway_received(nghttp2.error_code(frame[0].goaway.error_code))
elif frame_type == nghttp2.frame_type.SETTINGS:
protocol.settings_updated()
return 0
@nghttp2.on_frame_send_callback
def on_frame_send(session, frame, protocol):
stream_id = frame[0].hd.stream_id
frame_type = frame[0].hd.type
# print(protocol.__class__.__name__, "send", nghttp2.frame_type(frame_type).name, f"stream_id={stream_id}")
# if frame[0].hd.flags & nghttp2.flag.END_STREAM:
# print(protocol.__class__.__name__, " END_STREAM")
# if frame_type == nghttp2.frame_type.RST_STREAM:
# print(protocol.__class__.__name__, f" error_code={nghttp2.error_code(frame[0].rst_stream.error_code).name}")
# if frame_type == nghttp2.frame_type.DATA:
# print(protocol.__class__.__name__, f" connection_window_size={protocol.session.get_remote_window_size()}")
# print(protocol.__class__.__name__, f" stream_window_size={protocol.session.get_stream_remote_window_size(stream_id)}")
if frame[0].hd.flags & nghttp2.flag.END_HEADERS:
protocol.headers_sent(stream_id)
if frame[0].hd.flags & nghttp2.flag.END_STREAM:
if frame_type in [nghttp2.frame_type.HEADERS, nghttp2.frame_type.DATA]:
protocol.content_sent(stream_id)
elif frame_type == nghttp2.frame_type.GOAWAY:
protocol.goaway_sent(nghttp2.error_code(frame[0].goaway.error_code))
return 0
@nghttp2.on_header_callback
def on_header(session, frame, name, namelen, value, valuelen, flags, protocol):
header = (
string_at(name, size=namelen).decode(),
string_at(value, size=valuelen).decode(),
)
# msg = nghttp2.session_get_stream_user_data(session, frame[0].hd.stream_id)
# msg.headers.append(header)
protocol.on_header(frame[0].hd.stream_id, header)
return 0
@nghttp2.on_data_chunk_recv_callback
def on_data_chunk_recv(session, flags, stream_id, data, length, protocol):
protocol.on_data_chunk_recv(stream_id, string_at(data, length))
# msg = nghttp2.session_get_stream_user_data(session, stream_id)
# msg.content.feed_data(string_at(data, length))
# protocol.session.consume_connection(length)
# protocol.session.consume_stream(stream_id, length)
return 0
@nghttp2.on_stream_close_callback
def on_stream_close(session, stream_id, error_code, protocol):
error = nghttp2.error_code(error_code)
logger.debug("Stream %d closed (%s)", stream_id, error.name)
protocol.stream_closed(stream_id, error)
return 0
@nghttp2.on_begin_headers_callback
def on_begin_headers(session, frame, protocol):
stream_id = frame[0].hd.stream_id
protocol.begin_headers(stream_id)
return 0
class BaseHTTP2(asyncio.Protocol):
def __init__(self, settings, loop):
super().__init__()
self.loop = loop
self.session = None
self.transport = None
self.peername = None
# Dictionary mapping stream IDs to their associated stream data
# keeping a reference to assigned stream data and prevent it from
# garbage collection as long as the stream is still open. Otherwise it
# could be possible for stream data Python objects to be garbage
# collected and a callback would try to dereference the Python object
# pointer leading to a SEGFAULT error.
self._stream_data = {}
# Wait for the first SETTINGS frame before submitting any new stream
self._max_streams = 0
# A queue with pending (Response, Request) pairs. We use a FIFO queue to
# only open up as much new streams as allowed.
self._pending = collections.deque()
self._settings = settings
self._writing_paused = False
self._connection_lost = False
self._drain_waiter = None
self._window_update_waiters = {}
self._goaway_waiter = None
self._goaway_error = None
def connection_made(self, transport):
self.peername = transport.get_extra_info('peername')
self.transport = transport
self.establish_session()
logger.debug("Send SETTINGS frame")
self.flush()
def data_received(self, data):
logger.debug("Received %d bytes", len(data))
read = self.session.mem_recv(data)
if read < 0:
self.session.terminate(nghttp2.error_code.INTERNAL_ERROR)
self.flush()
self.transport.close()
return
if read != len(data):
logger.warn("Only %d bytes from %d processed", read, len(data))
self.flush()
def connection_lost(self, exc):
logger.debug("Connection to %s:%d closed", *self.peername)
self.session = None
self._connection_lost = True
if self._goaway_error is None:
self._goaway_error = ConnectionResetError('Connection lost')
reset_error = exc or self._goaway_error
for incoming, outgoing in self._stream_data.values():
if incoming is not None:
incoming.set_exception(reset_error)
if outgoing is not None:
outgoing.set_exception(reset_error)
for resp, req in self._pending:
req.set_exception(self._goaway_error)
resp.set_exception(self._goaway_error)
if not self._writing_paused and self._drain_waiter is not None:
waiter = self._drain_waiter
self._drain_waiter = None
if not waiter.done():
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
if self._drain_waiter is not None:
waiter = self._drain_waiter
self._drain_waiter = None
if not waiter.done():
waiter.set_exception(reset_error)
for waiter in self._window_update_waiters.values():
if not waiter.done():
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
self._window_update_waiters.clear()
if self._goaway_waiter is not None:
waiter = self._goaway_waiter
self._goaway_waiter = None
if not waiter.done():
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
def pause_writing(self):
self._writing_paused = True
def resume_writing(self):
self._writing_paused = False
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
self.flush()
def flush(self):
if self._connection_lost:
return
# Submit as much pending requests as allowed by the minimum
# SETTINGS_MAX_CONCURRENT_STREAMS of local and remote endpoint
while self._pending and len(self._stream_data) < self._max_streams:
resp, req = self._pending.pop()
if req.content.at_eof():
provider = None
else:
provider = nghttp2.data_provider(
source=nghttp2.data_source(ptr=req.content),
read_callback=read_data_source,
)
stream_id = self.session.submit_request(req.headers, provider)
req.stream_id = stream_id
resp.stream_id = stream_id
self._stream_data[stream_id] = resp, req
logger.debug("Submitted request on stream %d", stream_id)
while self.session.want_write() and not self._writing_paused:
data = self.session.mem_send()
if not data:
break
self.transport.write(data)
async def wait_for_window_update(self, stream_id):
assert stream_id not in self._window_update_waiters
if self._connection_lost:
raise self._goaway_error
waiter = self.loop.create_future()
self._window_update_waiters[stream_id] = waiter
await waiter
async def drain(self):
if self._connection_lost:
raise self._goaway_error
if not self._writing_paused:
return
assert self._drain_waiter is None
waiter = self.loop.create_future()
self._drain_waiter = waiter
await self._drain_waiter
def can_write_stream(self, stream_id):
return (
self.session.get_stream_remote_window_size(stream_id) > 0 and
self.session.get_remote_window_size() > 0
)
async def terminate(self, error_code=nghttp2.error_code.NO_ERROR):
if self._connection_lost:
return
assert self._goaway_waiter is None, 'Another coroutine is already waiting for the session to terminate'
self.session.terminate(error_code)
self.flush()
waiter = self.loop.create_future()
self._goaway_waiter = waiter
await waiter
def submit_response(self, stream_id, resp):
if self._connection_lost:
raise self._goaway_error
if resp.content.at_eof():
provider = None
else:
provider = nghttp2.data_provider(
source=nghttp2.data_source(ptr=resp.content),
read_callback=read_data_source,
)
self.session.submit_response(stream_id, resp.headers, provider)
req, _ = self._stream_data[stream_id]
self._stream_data[stream_id] = (req, resp)
self.flush()
def settings_updated(self):
logger.debug("SETTINGS updated")
self._max_streams = min(
self.session.get_local_settings(nghttp2.settings_id.MAX_CONCURRENT_STREAMS),
self.session.get_remote_settings(nghttp2.settings_id.MAX_CONCURRENT_STREAMS),
)
def on_header(self, stream_id, header):
incoming, _ = self._stream_data[stream_id]
incoming.headers.append(header)
def headers_sent(self, stream_id):
_, outgoing = self._stream_data[stream_id]
outgoing.headers_sent()
def on_data_chunk_recv(self, stream_id, chunk):
incoming, _ = self._stream_data[stream_id]
incoming.content.feed_data(chunk)
self.session.consume_connection(len(chunk))
def content_sent(self, stream_id):
_, outgoing = self._stream_data[stream_id]
outgoing.content_sent()
def stream_closed(self, stream_id, error_code):
if stream_id not in self._stream_data:
return
for msg in self._stream_data[stream_id]:
if msg is not None:
msg.stream_closed(error_code)
del self._stream_data[stream_id]
def window_update_received(self, stream_id):
waiter = self._window_update_waiters.get(stream_id, None)
if waiter:
del self._window_update_waiters[stream_id]
waiter.set_result(None)
def goaway_sent(self, error_code):
self._goaway_error = ConnectionResetError('Connection lost ({})'.format(
error_code.name)
)
waiter = self._goaway_waiter
if waiter is not None:
self._goaway_waiter = None
waiter.set_result(error_code)
def goaway_received(self, error_code):
self._goaway_error = ConnectionResetError('Connection lost ({})'.format(
error_code.name)
)
self.transport.close()
class ServerProtocol(BaseHTTP2):
def __init__(self, on_request_callback, settings, loop):
super().__init__(settings, loop)
self._on_request = on_request_callback
def establish_session(self):
logger.debug('Connection from %s:%d', *self.peername)
options = nghttp2.Options(no_auto_window_update=True, no_http_messaging=True)
self.session = nghttp2.Session(nghttp2.session_type.SERVER, {
'on_frame_recv': on_frame_recv,
'on_data_chunk_recv': on_data_chunk_recv,
'on_frame_send': on_frame_send,
'on_stream_close': on_stream_close,
'on_begin_headers': on_begin_headers,
'on_header': on_header,
}, user_data=self, options=options)
self.session.submit_settings(self._settings)
def begin_headers(self, stream_id):
req = Request(self, stream_id, direction=Direction.RECEIVING, loop=self.loop)
self._stream_data[stream_id] = (req, None)
def headers_received(self, stream_id):
req, _ = self._stream_data[stream_id]
req.headers_received()
self._on_request(req)
def content_received(self, stream_id):
req, _ = self._stream_data[stream_id]
req.content.feed_eof()
class ClientProtocol(BaseHTTP2):
def establish_session(self):
logger.debug('Connected to %s:%d', *self.peername)
options = nghttp2.Options(no_auto_window_update=True, no_http_messaging=True)
self.session = nghttp2.Session(nghttp2.session_type.CLIENT, {
'on_frame_recv': on_frame_recv,
'on_frame_send': on_frame_send,
'on_data_chunk_recv': on_data_chunk_recv,
'on_stream_close': on_stream_close,
'on_begin_headers': on_begin_headers,
'on_header': on_header,
}, user_data=self, options=options)
self.session.submit_settings(self._settings)
def begin_headers(self, stream_id):
pass
def headers_received(self, stream_id):
resp, _ = self._stream_data[stream_id]
resp.headers_received()
def content_received(self, stream_id):
resp, _ = self._stream_data[stream_id]
resp.content.feed_eof()
def stream_closed(self, stream_id, error_code):
# If the stream was refused, reschedule the request and the response
# into the pending queue
if error_code == nghttp2.error_code.REFUSED_STREAM:
if stream_id in self._stream_data:
resp, req = self._stream_data.pop(stream_id)
# Reset HTTP message
req._headers_sent = False
req._content_sent = False
self._pending.appendleft((req, resp))
super().stream_closed(stream_id, error_code)
def submit_request(self, req, resp):
if self._connection_lost:
raise self._goaway_error
self._pending.append((resp, req))
# Submit pending requests and them to buffers
self.flush()
class ServerSession(object):
def __init__(self, host, port, settings=None, loop=None):
self.host = host
self.port = port
self.loop = loop or asyncio.get_event_loop()
self.server = None
self._requests = collections.deque()
self._waiter = None
if settings is None:
self._settings = [
(nghttp2.settings_id.MAX_CONCURRENT_STREAMS, 10),
]
else:
self._settings = settings
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, *exc):
self.close()
await self.wait_closed()
def __await__(self):
return self._wait_for_request().__await__()
async def _wait_for_request(self):
# If no requests are available wait for at least one to arrive
if not self._requests:
assert self._waiter is None, 'Another coroutine is already waiting for new requests'
logger.debug("Wait for request")
waiter = self.loop.create_future()
self._waiter = waiter
try:
await waiter
finally:
self._waiter = None
return self._requests.popleft()
def _received_request(self, req):
self._requests.append(req)
if self._waiter is not None:
if not self._waiter.done():
self._waiter.set_result(None)
async def start(self):
assert self.server is None, "ServerSession already started"
self._requests.clear()
self.server = await self.loop.create_server(
lambda: ServerProtocol(self._received_request, self._settings, self.loop),
self.host, self.port)
def close(self):
self.server.close()
waiter = self._waiter
if waiter is not None and | |
by specifying the bit
position which is set in that value.
- An 'offset','extbase','extends' triplet specifies a value
as an offset to a base value defined by the specified
'extbase' extension name, which is then cast to the
typename specified by 'extends'. This requires probing
the registry database, and imbeds knowledge of the
API extension enum scheme in this function.
- An 'alias' attribute contains the name of another enum
which this is an alias of. The other enum must be
declared first when emitting this enum."""
name = elem.get('name')
numVal = None
if 'value' in elem.keys():
value = elem.get('value')
# print('About to translate value =', value, 'type =', type(value))
if needsNum:
numVal = int(value, 0)
# If there's a non-integer, numeric 'type' attribute (e.g. 'u' or
# 'ull'), append it to the string value.
# t = enuminfo.elem.get('type')
# if t is not None and t != '' and t != 'i' and t != 's':
# value += enuminfo.type
if forceSuffix:
if bitwidth == 64:
value = value + 'ULL'
else:
value = value + 'U'
self.logMsg('diag', 'Enum', name, '-> value [', numVal, ',', value, ']')
return [numVal, value]
if 'bitpos' in elem.keys():
value = elem.get('bitpos')
bitpos = int(value, 0)
numVal = 1 << bitpos
value = '0x%08x' % numVal
if bitwidth == 64:
value = value + 'ULL'
elif forceSuffix:
value = value + 'U'
self.logMsg('diag', 'Enum', name, '-> bitpos [', numVal, ',', value, ']')
return [numVal, value]
if 'offset' in elem.keys():
# Obtain values in the mapping from the attributes
enumNegative = False
offset = int(elem.get('offset'), 0)
extnumber = int(elem.get('extnumber'), 0)
extends = elem.get('extends')
if 'dir' in elem.keys():
enumNegative = True
self.logMsg('diag', 'Enum', name, 'offset =', offset,
'extnumber =', extnumber, 'extends =', extends,
'enumNegative =', enumNegative)
# Now determine the actual enumerant value, as defined
# in the "Layers and Extensions" appendix of the spec.
numVal = self.extBase + (extnumber - 1) * self.extBlockSize + offset
if enumNegative:
numVal *= -1
value = '%d' % numVal
# More logic needed!
self.logMsg('diag', 'Enum', name, '-> offset [', numVal, ',', value, ']')
return [numVal, value]
if 'alias' in elem.keys():
return [None, elem.get('alias')]
return [None, None]
def checkDuplicateEnums(self, enums):
"""Check enumerated values for duplicates.
- enums - list of `<enum>` Elements
returns the list with duplicates stripped"""
# Dictionaries indexed by name and numeric value.
# Entries are [ Element, numVal, strVal ] matching name or value
nameMap = {}
valueMap = {}
stripped = []
for elem in enums:
name = elem.get('name')
(numVal, strVal) = self.enumToValue(elem, True)
if name in nameMap:
# Duplicate name found; check values
(name2, numVal2, strVal2) = nameMap[name]
# Duplicate enum values for the same name are benign. This
# happens when defining the same enum conditionally in
# several extension blocks.
if (strVal2 == strVal or (numVal is not None
and numVal == numVal2)):
True
# self.logMsg('info', 'checkDuplicateEnums: Duplicate enum (' + name +
# ') found with the same value:' + strVal)
else:
self.logMsg('warn', 'checkDuplicateEnums: Duplicate enum (' + name
+ ') found with different values:' + strVal
+ ' and ' + strVal2)
# Don't add the duplicate to the returned list
continue
elif numVal in valueMap:
# Duplicate value found (such as an alias); report it, but
# still add this enum to the list.
(name2, numVal2, strVal2) = valueMap[numVal]
msg = 'Two enums found with the same value: {} = {} = {}'.format(
name, name2.get('name'), strVal)
self.logMsg('error', msg)
# Track this enum to detect followon duplicates
nameMap[name] = [elem, numVal, strVal]
if numVal is not None:
valueMap[numVal] = [elem, numVal, strVal]
# Add this enum to the list
stripped.append(elem)
# Return the list
return stripped
def misracstyle(self):
return False;
def misracppstyle(self):
return False;
def buildEnumCDecl(self, expand, groupinfo, groupName):
"""Generate the C declaration for an enum"""
groupElem = groupinfo.elem
# Determine the required bit width for the enum group.
# 32 is the default, which generates C enum types for the values.
bitwidth = 32
# If the constFlagBits preference is set, 64 is the default for bitmasks
if self.genOpts.conventions.constFlagBits and groupElem.get('type') == 'bitmask':
bitwidth = 64
# Check for an explicitly defined bitwidth, which will override any defaults.
if groupElem.get('bitwidth'):
try:
bitwidth = int(groupElem.get('bitwidth'))
except ValueError as ve:
self.logMsg('error', 'Invalid value for bitwidth attribute (', groupElem.get('bitwidth'), ') for ', groupName, ' - must be an integer value\n')
exit(1)
usebitmask = False
usedefine = False
# Bitmask flags can be generated as either "static const uint{32,64}_t" values,
# or as 32-bit C enums. 64-bit types must use uint64_t values.
if groupElem.get('type') == 'bitmask':
if bitwidth > 32 or self.misracppstyle():
usebitmask = True
if self.misracstyle():
usedefine = True
if usedefine or usebitmask:
# Validate the bitwidth and generate values appropriately
if bitwidth > 64:
self.logMsg('error', 'Invalid value for bitwidth attribute (', groupElem.get('bitwidth'), ') for bitmask type ', groupName, ' - must be less than or equal to 64\n')
exit(1)
else:
return self.buildEnumCDecl_BitmaskOrDefine(groupinfo, groupName, bitwidth, usedefine)
else:
# Validate the bitwidth and generate values appropriately
if bitwidth > 32:
self.logMsg('error', 'Invalid value for bitwidth attribute (', groupElem.get('bitwidth'), ') for enum type ', groupName, ' - must be less than or equal to 32\n')
exit(1)
else:
return self.buildEnumCDecl_Enum(expand, groupinfo, groupName)
def buildEnumCDecl_BitmaskOrDefine(self, groupinfo, groupName, bitwidth, usedefine):
"""Generate the C declaration for an "enum" that is actually a
set of flag bits"""
groupElem = groupinfo.elem
flagTypeName = groupElem.get('name')
# Prefix
body = "// Flag bits for " + flagTypeName + "\n"
if bitwidth == 64:
body += "typedef VkFlags64 %s;\n" % flagTypeName;
else:
body += "typedef VkFlags %s;\n" % flagTypeName;
# Maximum allowable value for a flag (unsigned 64-bit integer)
maxValidValue = 2**(64) - 1
minValidValue = 0
# Get a list of nested 'enum' tags.
enums = groupElem.findall('enum')
# Check for and report duplicates, and return a list with them
# removed.
enums = self.checkDuplicateEnums(enums)
# Accumulate non-numeric enumerant values separately and append
# them following the numeric values, to allow for aliases.
# NOTE: this doesn't do a topological sort yet, so aliases of
# aliases can still get in the wrong order.
aliasText = ''
# Loop over the nested 'enum' tags.
for elem in enums:
# Convert the value to an integer and use that to track min/max.
# Values of form -(number) are accepted but nothing more complex.
# Should catch exceptions here for more complex constructs. Not yet.
(numVal, strVal) = self.enumToValue(elem, True, bitwidth, True)
name = elem.get('name')
# Range check for the enum value
if numVal is not None and (numVal > maxValidValue or numVal < minValidValue):
self.logMsg('error', 'Allowable range for flag types in C is [', minValidValue, ',', maxValidValue, '], but', name, 'flag has a value outside of this (', strVal, ')\n')
exit(1)
decl = self.genRequirements(name, mustBeFound = False)
if self.isEnumRequired(elem):
protect = elem.get('protect')
if protect is not None:
body += '#ifdef {}\n'.format(protect)
if usedefine:
decl += "#define {} {}\n".format(name, strVal)
elif self.misracppstyle():
decl += "static constexpr {} {} {{{}}};\n".format(flagTypeName, name, strVal)
else:
# Some C compilers only allow initializing a 'static const' variable with a literal value.
# So initializing an alias from another 'static const' value would fail to compile.
# Work around this by chasing the aliases to get the actual value.
while numVal is None:
alias = self.registry.tree.find("enums/enum[@name='" + strVal + "']")
(numVal, strVal) = self.enumToValue(alias, True, bitwidth, True)
decl += "static const {} {} = {};\n".format(flagTypeName, name, strVal)
if numVal is not None:
body += decl
else:
aliasText += decl
if protect is not None:
body += '#endif\n'
# Now append the non-numeric enumerant values
body += aliasText
# Postfix
return ("bitmask", body)
def buildEnumCDecl_Enum(self, expand, groupinfo, groupName):
"""Generate the C declaration for an enumerated type"""
groupElem = groupinfo.elem
# Break the group name into prefix and suffix portions for range
| |
TVLA.
traces_to_use = np.zeros(len(project.waves), dtype=bool)
traces_to_use[trace_start:trace_end + 1] = True
if i_step == 0:
# Keep a single trace to create the figures.
single_trace = traces[1]
if save_to_disk_trace:
log.info("Saving Traces")
np.savez('tmp/traces.npy', traces=traces, traces_to_use=traces_to_use,
trace_start=trace_start, trace_end=trace_end)
if ((save_to_disk_trace is True or save_to_disk_ttest is True)
and general_test is True and i_step == 0):
np.save('tmp/single_trace.npy', single_trace)
else:
trace_file = np.load(args.trace_file)
traces = trace_file['traces']
traces_to_use = trace_file['traces_to_use']
assert num_samples == traces.shape[1]
# If a trace range is specified, it must match the range in the trace file.
# Otherwise, we might end up using a leakage model that doesn't match the actual
# traces.
if args.trace_start is None:
trace_start = trace_file['trace_start']
assert trace_start == trace_file['trace_start']
if args.trace_end is None:
trace_end = trace_file['trace_end']
assert trace_end == trace_file['trace_end']
num_traces = trace_end - trace_start + 1
# The project file must match the trace file.
assert len(project.waves) == len(traces_to_use)
# Correct num_traces based on filtering.
num_traces_orig = num_traces
num_traces = np.sum(traces_to_use)
log.info(
f"Will use {num_traces} traces "
f"({100*num_traces/num_traces_orig:.1f}%)"
)
if args.leakage_file is None:
# Create local, dense copies of keys and plaintexts. This allows the leakage
# computation to be parallelized.
keys = np.empty((num_traces_orig, 16), dtype=np.uint8)
if general_test is False:
keys[:] = project.keys[trace_start:trace_end + 1]
else:
# Existing KMAC trace sets use a mix of bytes strings and ChipWhisperer byte
# arrays. For compatiblity, we need to convert everything to numpy arrays.
# Eventually, we can drop this.
if i_step == 0:
# Convert all keys from the project file to numpy arrays once.
keys_nparrays = []
for i in range(num_traces_tot):
keys_nparrays.append(np.frombuffer(project.keys[i], dtype=np.uint8))
# In addition, for some existing trace sets the fixed key is used for the
# second instead of the first trace. For compatibility, compare a couple of
# keys and then select the fixed one. Eventually, we can drop this.
for i_key in range(10):
fixed_key = keys_nparrays[i_key]
num_hits = 0
for i in range(10):
num_hits += np.array_equal(fixed_key, keys_nparrays[i])
if num_hits > 1:
break
# Select the correct slice of keys for each step.
keys[:] = keys_nparrays[trace_start:trace_end + 1]
# Only select traces to use.
keys = keys[traces_to_use[trace_start:trace_end + 1]]
if general_test is False:
# The plaintexts are only required for non-general AES TVLA.
plaintexts = np.empty((num_traces_orig, 16), dtype=np.uint8)
plaintexts[:] = project.textins[trace_start:trace_end + 1]
plaintexts = plaintexts[traces_to_use[trace_start:trace_end + 1]]
# We don't need the project file anymore after this point. Close it together with all
# trace files opened in the background.
project.close(save=False)
if general_test is False:
# Compute or load prevsiously computed leakage model.
if args.leakage_file is None:
# leakage models: HAMMING_WEIGHT (default), HAMMING_DISTANCE
log.info("Computing Leakage")
leakage = Parallel(n_jobs=num_jobs)(
delayed(compute_leakage_aes)(keys[i:i + trace_step_leakage],
plaintexts[i:i + trace_step_leakage],
'HAMMING_WEIGHT')
for i in range(0, num_traces, trace_step_leakage))
leakage = np.concatenate((leakage[:]), axis=2)
if save_to_disk_leakage:
log.info("Saving Leakage")
np.save('tmp/leakage.npy', leakage)
else:
leakage = np.load(args.leakage_file)
assert num_traces == leakage.shape[2]
else:
log.info("Computing Leakage")
# We do general fixed-vs-random TVLA. The "leakage" is indicating whether a trace
# belongs to the fixed (1) or random (0) group.
leakage = np.zeros((num_traces), dtype=np.uint8)
for i in range(num_traces):
leakage[i] = np.array_equal(fixed_key, keys[i])
log.info("Building Histograms")
if general_test is False:
# For every time sample we make two histograms, one for Hamming weight of the
# sensitive variable = 0 (fixed set) and one for Hamming weight > 0 (random set).
# histograms has dimensions [num_rnds, num_bytes, 2, num_samples, trace_resolution]
# The value stored in histograms[v][w][x][y][z] shows how many traces have value z
# at sample y, given that HW(state byte w in AES round v) = 0 (fixed set, x = 0) or
# > 0 (random set, x = 1).
# The computation is parallelized over the samples.
histograms = Parallel(n_jobs=num_jobs)(
delayed(compute_histograms_aes)(trace_resolution, rnd_list, byte_list,
traces[:, i:i + sample_step_hist], leakage)
for i in range(0, num_samples, sample_step_hist))
histograms = np.concatenate((histograms[:]), axis=3)
else:
# For every time sample we make 2 histograms, one for the fixed set and one for the
# random set.
# histograms has dimensions [0, 0, 2, num_samples, trace_resolution]
# The value stored in histograms[v][w][x][y][z] shows how many traces have value z
# at time y, given that trace is in the fixed (x = 0) or random (x = 1) group. The
# v and w indices are not used but we keep them for code compatiblitly with
# non-general AES TVLA.
histograms = Parallel(n_jobs=num_jobs)(
delayed(compute_histograms_general)(trace_resolution,
traces[:, i:i + sample_step_hist],
leakage)
for i in range(0, num_samples, sample_step_hist))
histograms = np.concatenate((histograms[:]), axis=3)
# Add up new data to potential, previously generated histograms.
if args.input_file is not None or i_step > 0:
histograms = histograms + histograms_in
# Move current histograms to temp variable for next step.
if num_steps > 1 and i_step < num_steps - 1:
histograms_in = histograms
# Histograms can be saved for later use if output file name is passed.
if args.output_file is not None:
log.info("Saving Histograms")
np.savez(args.output_file, histograms=histograms, rnd_list=rnd_list,
byte_list=byte_list)
# Computing the t-test statistics vs. time.
log.info("Computing T-test Statistics")
# The number of samples processed by each parallel job at a time.
sample_step_ttest = num_samples // num_jobs
# By default, the first two moments are computed. This can be modified to any order.
num_orders = 2
x_axis = np.arange(trace_resolution)
# Compute statistics.
# ttest_trace has dimensions [num_orders, num_rnds, num_bytes, num_samples].
ttest_trace = Parallel(n_jobs=num_jobs)(
delayed(compute_statistics)(num_orders, rnd_list, byte_list,
histograms[:, :, :, i:i + sample_step_ttest, :],
x_axis)
for i in range(0, num_samples, sample_step_ttest))
ttest_trace = np.concatenate((ttest_trace[:]), axis=3)
# Building the t-test statistics vs. number of traces used. ttest_step has dimensions
# [num_orders, num_rnds, num_bytes, num_samples, num_steps], i.e., for every order,
# every round, every byte, every sample and every step, we track the t-test value.
log.info("Updating T-test Statistics vs. Number of Traces")
if i_step == 0:
ttest_step = np.empty((num_orders, num_rnds, num_bytes, num_samples,
num_steps))
ttest_step[:, :, :, :, i_step] = ttest_trace
if save_to_disk_ttest:
log.info("Saving T-test Step")
np.savez('tmp/ttest-step.npy',
ttest_step=ttest_step,
trace_end_vec=trace_end_vec,
rnd_list=rnd_list,
byte_list=byte_list)
rnd_ext = list(range(num_rnds))
byte_ext = list(range(num_bytes))
elif args.ttest_step_file is not None:
# Load previously generated t-test results.
ttest_step_file = np.load(args.ttest_step_file)
ttest_step = ttest_step_file['ttest_step']
num_orders = ttest_step.shape[0]
num_samples = ttest_step.shape[3]
num_steps = ttest_step.shape[4]
trace_end_vec = ttest_step_file['trace_end_vec']
# The rounds and bytes of interests must be available in the previously generated t-test
# results. In addition, we may need to translate indices to extract the right portion of
# of the loaded results.
rnd_ext = np.zeros((num_rnds), dtype=np.uint8)
byte_ext = np.zeros((num_bytes), dtype=np.uint8)
for i_rnd in range(num_rnds):
assert rnd_list[i_rnd] in ttest_step_file['rnd_list']
rnd_ext[i_rnd] = np.where(ttest_step_file['rnd_list'] == rnd_list[i_rnd])[0][0]
for i_byte in range(num_bytes):
assert byte_list[i_byte] in ttest_step_file['byte_list']
byte_ext[i_byte] = np.where(ttest_step_file['byte_list'] == byte_list[i_byte])[0][0]
# Plot the t-test vs. time figures for the maximum number of traces.
ttest_trace = ttest_step[:, :, :, :, num_steps-1]
if general_test is True:
single_trace_file = os.path.dirname(args.ttest_step_file)
single_trace_file += "/" if single_trace_file else ""
single_trace_file += "single_trace.npy"
single_trace = np.load(single_trace_file)
assert num_samples == single_trace.shape[0]
# Check ttest results.
threshold = 4.5
failure = np.any(np.abs(ttest_trace) >= threshold, axis=3)
nan = np.isnan(np.sum(ttest_trace, axis=3))
if not np.any(failure):
log.info("No leakage above threshold identified.")
if np.any(failure) or np.any(nan):
if general_test is False:
if np.any(failure):
log.info("Leakage above threshold identified in the following order(s), round(s) "
"and byte(s) marked with X:")
if np.any(nan):
log.info("Couldn't compute statistics for order(s), round(s) and byte(s) marked "
"with O:")
with UnformattedLog():
byte_str = "Byte |"
dash_str = "----------"
for i_byte in range(num_bytes):
byte_str += str(byte_list[i_byte]).rjust(5)
dash_str += "-----"
for i_order in range(num_orders):
log.info(f"Order {i_order + 1}:")
log.info(f"{byte_str}")
log.info(f"{dash_str}")
for i_rnd in range(num_rnds):
result_str = "Round " + str(rnd_list[i_rnd]).rjust(2) + " |"
for i_byte in range(num_bytes):
if failure[i_order, rnd_ext[i_rnd], byte_ext[i_byte]]:
result_str += str("X").rjust(5)
elif nan[i_order, rnd_ext[i_rnd], byte_ext[i_byte]]:
result_str += str("O").rjust(5)
else:
result_str += " "
log.info(f"{result_str}")
log.info("")
else:
log.info("Leakage above threshold identified in the following order(s) marked with X")
if np.any(nan):
log.info("Couldn't compute statistics for order(s) marked with O:")
with UnformattedLog():
for i_order in range(num_orders):
result_str = "Order " + str(i_order + 1) + ": "
if failure[i_order, 0, 0]:
result_str += "X"
elif nan[i_order, 0, 0]:
result_str += "O"
else:
| |
# %% [markdown]
# # Image compression - part 2. - Autoencoders
# In this post I will be looking at building an autoencoder to compress the MNIST dataset.
# See part 1. [here](https://github.com/stanton119/data-analysis/blob/master/PyTorchStuff/autoencoders/pca.md).
#
# Autoencoders build a network to encode the original images into a latent space and then build a decoder
# to reproduce back the same image.
# By having a latent space representation that is small we force the network to compress the information.
# The latent space is similar to the concept of components within PCA, but in this case the encoder and decoder
# can be nonlinear.
# The PCA weights that form components can be seen as a subset of the possible solutions for the autoencoder.
# As such I expect the autoencoder to do at least as good as PCA on compressing the images.
#
# First let's download the required dataset. This time we download the test set as well to inform us on overfitting.
# %%
from pathlib import Path
import torch
import torchvision
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
mnist_train_data = torchvision.datasets.MNIST(
Path() / "data", train=True, download=True, transform=transform
)
mnist_train = torch.utils.data.DataLoader(mnist_train_data, batch_size=64)
mnist_test_data = torchvision.datasets.MNIST(
Path() / "data", train=False, download=True, transform=transform
)
mnist_test = torch.utils.data.DataLoader(mnist_test_data, batch_size=64)
# %% [markdown]
# ## Dense Autoencoder
# Our first autoencoder will be based on dense layers.
# I may follow up with a comparison to convolution layers which are typically used in image based problems.
#
# This is a pretty standard setup where the output size of each layer of the encoder shrinks toward the latent dimension.
# Images are reshaped into a vector and processed through normal dense layers.
# The decode effectively inverses the operations of the encoder.
# It uses a sigmoid activation at the end to ensure the correct pixel value range is observed.
# To optimise, we are minimising the reconstruction MSE.
# %%
import pytorch_lightning as pl
class AutoEncoderDense(pl.LightningModule):
def __init__(self, n_inputs: int = 1, n_latent: int = 5):
super().__init__()
self.train_log = []
self.n_latent = n_latent
self.encoder = torch.nn.Sequential(
torch.nn.Linear(28 * 28, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 32),
torch.nn.ReLU(),
torch.nn.Linear(32, n_latent),
)
self.decoder = torch.nn.Sequential(
torch.nn.Linear(n_latent, 32),
torch.nn.ReLU(),
torch.nn.Linear(32, 64),
torch.nn.ReLU(),
torch.nn.Linear(64, 28 * 28),
torch.nn.Sigmoid(),
)
def forward(self, x):
x = x.reshape(-1, 28 * 28)
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded.reshape(-1, 1, 28, 28)
def configure_optimizers(self, learning_rate=1e-3):
optimizer = torch.optim.Adam(
self.parameters(),
lr=learning_rate,
)
return optimizer
def training_step(self, batch, batch_idx):
x, y = batch
x_hat = self(x)
loss = torch.nn.MSELoss()(x_hat, x)
self.log("loss", loss)
self.train_log.append(loss.detach().numpy())
return loss
# %% [markdown]
# Compared to our PCA example, the number of parameters we are tuning here is significantly larger.
# In fact larger than our training set of 60k examples:
# %%
model_dense = AutoEncoderDense(n_latent=10)
print(model_dense.summarize())
# %% [markdown]
# We will examine this is more detail later.
#
# ## Training
# We will now create several dense networks with different latent space sizes.
# We save the networks each time so that we can recall them later for predictions.
# Plotting the training MSE shows if the model has converged successfully.
# %%
latent_space_dim = [3, 5, 10, 20, 30, 50]
model_path = Path() / "models"
model_path.mkdir(exist_ok=True)
for n_latent in latent_space_dim:
print(f"training: {n_latent}")
model_dense = AutoEncoderDense(n_latent=n_latent)
trainer = pl.Trainer(max_epochs=10)
trainer.fit(model_dense, mnist_train)
torch.save(model_dense, model_path / f"dense_{n_latent}.pt")
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(model_dense.train_log)
ax.set_title(f"Training error: {n_latent}")
ax.set_xlabel("Batches")
ax.set_ylabel("MSE")
# %% [markdown]
# ## Results
# We need to get the MSE of all images so we can see how the latent space affects reconstruction error.
# For this we reload each network and predict all the training images.
# %%
# use whole training dataset
dataloader = torch.utils.data.DataLoader(
dataset=mnist_train_data, batch_size=len(mnist_train_data)
)
images_all, labels_all = next(iter(dataloader))
# dense model error
mse_train_dense = []
for n_latent in latent_space_dim:
print(f"mse: {n_latent}")
model_dense = torch.load(model_path / f"dense_{n_latent}.pt")
images_all_hat = model_dense(images_all)
_loss = torch.nn.MSELoss()(images_all_hat, images_all)
mse_train_dense.append(_loss.detach().numpy())
# %% [markdown]
# To examine the results of the networks we will compare against PCA as a baseline.
# Here we fit a PCA model as previously shown.
# Then we reconstruct the images and measure the MSE at each latent space size.
# %%
import numpy as np
import pandas as pd
import sklearn.decomposition
import sklearn.metrics
# convert images to 1D vectors
images_flat = images_all[:, 0].reshape(-1, 784).numpy()
images_flat.shape
print(f"training components: {latent_space_dim[-1]}")
pca = sklearn.decomposition.PCA(n_components=latent_space_dim[-1])
images_flat_hat = pca.inverse_transform(pca.fit_transform(images_flat))
def transform_truncated(pca, X, n_components):
X = pca._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if pca.mean_ is not None:
X = X - pca.mean_
X_transformed = np.dot(X, pca.components_[:n_components, :].T)
if pca.whiten:
X_transformed /= np.sqrt(pca.explained_variance_)
return X_transformed
def inv_transform(pca, X, n_components):
return np.dot(X, pca.components_[:n_components, :]) + pca.mean_
def inv_forward_transform(pca, X, n_components):
return inv_transform(
pca, transform_truncated(pca, X, n_components), n_components
)
# get pca mse
mse_train_pca = []
for n_latent in latent_space_dim:
print(f"mse: {n_latent}")
images_flat_hat = inv_forward_transform(
pca, X=images_flat, n_components=n_latent
)
_loss = sklearn.metrics.mean_squared_error(images_flat_hat, images_flat)
mse_train_pca.append(_loss)
# %% [markdown]
# Now let's plot the two approaches side by side:
# %%
# reconstruction mse
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(latent_space_dim, mse_train_dense, label="dense")
ax.plot(latent_space_dim, mse_train_pca, label="pca")
ax.set_title("Reconstruction error")
ax.set_xlabel("Latent space size")
ax.set_ylabel("MSE")
fig.legend()
# %% [markdown]
# We can see that the dense autoencoder does do better generally.
# Particularly so at small latent space sizes.
# Once the latent space gets much larger PCA becomes comparible.
# With a latent space of 50, in the autoencoder this is greater than the output
# size of the preceeding layer, hence we dont expect any improvement here.
#
# ## Test set
# However as noted prior, there are more parameters than images, so we could easily be overfitting here.
# To confirm we can check the reconstruction error on the unseen test set.
# %%
# Run same analysis on test set to check for overfitting
# use whole training dataset
dataloader = torch.utils.data.DataLoader(
dataset=mnist_test_data, batch_size=len(mnist_test_data)
)
images_all, labels_all = next(iter(dataloader))
images_flat = images_all[:, 0].reshape(-1, 784).numpy()
# autoencoder
mse_test_dense = []
for n_latent in latent_space_dim:
print(f"mse: {n_latent}")
model_dense = torch.load(model_path / f"dense_{n_latent}.pt")
images_all_hat = model_dense(images_all)
_loss = torch.nn.MSELoss()(images_all_hat, images_all)
mse_test_dense.append(_loss.detach().numpy())
# pca
mse_test_pca = []
for n_latent in latent_space_dim:
print(f"mse: {n_latent}")
images_flat_hat = inv_forward_transform(
pca, X=images_flat, n_components=n_latent
)
_loss = sklearn.metrics.mean_squared_error(images_flat_hat, images_flat)
mse_test_pca.append(_loss)
# reconstruction mse
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(latent_space_dim, mse_test_dense, label="dense")
ax.plot(latent_space_dim, mse_test_pca, label="pca")
ax.set_title("Reconstruction error")
ax.set_xlabel("Latent space size")
ax.set_ylabel("MSE")
fig.legend()
# %% [markdown]
# We obtain very similar results to before.
# This gives us a good indication we are not overfitting.
# Therefore the autoencoders should generalise to unseen images fine.
# For more confidence it would be nice to apply cross validation and get multiple instances of the model and results.
# I'll skip this for now in the interests of time.
#
# ## Results - images
# We have an improvement in MSE but it's good to check the actual reconstructed images to confirm with our eyes.
#
# First for PCA - top row are the originals, subsequent rows are increasing latent space size.
# %%
fig, ax = plt.subplots(figsize=(20, 20), ncols=6, nrows=5)
for row, n_latent in enumerate(latent_space_dim[:4]):
images_hat = inv_forward_transform(
pca, X=images_flat, n_components=n_latent
).reshape(-1, 28, 28)
for col in range(6):
ax[0, col].imshow(images_all[col, 0])
ax[0, col].set_title(str(labels_all[col].numpy()))
ax[row + 1, col].imshow(images_hat[col])
ax[row + 1, col].set_title(str(labels_all[col].numpy()))
# %% [markdown]
# The same for the autoencoder:
# %%
fig, ax = plt.subplots(figsize=(20, 20), ncols=6, nrows=5)
for row, n_latent in enumerate(latent_space_dim[:4]):
model_dense = torch.load(model_path / f"dense_{n_latent}.pt")
images_hat = model_dense(images_all).detach()
for col in range(6):
ax[0, col].imshow(images_all[col, 0])
ax[0, col].set_title(str(labels_all[col].numpy()))
ax[row + 1, col].imshow(images_hat[col,0])
ax[row + 1, col].set_title(str(labels_all[col].numpy()))
# %% [markdown]
# We can see that the autoencoder is much clearer at small latent spaces.
# Even at only 3, the images are pretty decent.
#
# Similar to PCA, some digits look worse than others.
# We can plot the MSE against the digit to see which are hard to construct:
# %%
# MSE against label - PCA benchmark
images_flat_hat = inv_forward_transform(
pca, X=images_flat, n_components=latent_space_dim[2]
)
loss_label_pca = []
for label in range(0, 10):
filt = labels_all == label
_loss = sklearn.metrics.mean_squared_error(
images_flat_hat[filt], images_flat[filt]
)
loss_label_pca.append(_loss)
# MSE against label for autoencoder
loss_label = []
for row, n_latent in enumerate(latent_space_dim):
model_dense = torch.load(model_path / f"dense_{n_latent}.pt")
images_all_hat = model_dense(images_all)
_loss_label = []
for label in range(0, 10):
filt = labels_all == label
_loss = torch.nn.MSELoss()(
images_all_hat[filt].detach(), images_all[filt].detach()
).numpy().flatten()[0]
_loss_label.append(_loss)
loss_label.append(_loss_label)
# create plot with pca benchmark
df_loss = pd.DataFrame(
loss_label, index=latent_space_dim, columns=range(0, 10)
).transpose()
fig, ax = plt.subplots(figsize=(10, 6))
df_loss.plot(ax=ax, legend=False)
ax.plot(range(0, 10), loss_label_pca, '--', label=f'pca_{latent_space_dim[2]}')
ax.set_title("Reconstruction error by digit number")
ax.set_xlabel("Digit label")
ax.set_ylabel("MSE")
fig.legend()
# %% [markdown]
# The digits the autoencoder struggle with are generally the same as PCA.
# We can see the reconstruction error for an autoencoder with 5 latent variables is comparible
# to PCA with 10 components.
# The autoencoder seems to do better reconstructing '1', '6' and | |
to 1.1, even though the metadata of
# the profile really says 1.0. We will use this to check
# whether the upgrade step has been applied (version is 1.1)
# or the full profile has been applied (version is 1.0).
step_bar = UpgradeStep(
"Upgrade", "other:bar", '1.0', '1.1', '', dummy_upgrade, None, "1")
_registerUpgradeStep(step_bar)
# And another one.
step_ham = UpgradeStep(
"Upgrade", "other:ham", '1.0', '1.1', '', dummy_upgrade, None, "1")
_registerUpgradeStep(step_ham)
# Gather list of imported profiles.
tool._imported = []
def applyContext(context):
tool._imported.append(context._profile_path)
tool.applyContext = applyContext
return tool
def test_runAllImportStepsFromProfile_with_default_strategy(self):
# Default strategy: apply new profiles, upgrade old profiles.
tool = self._setup_dependency_strategy_test_tool()
# Run the main profile.
tool.runAllImportStepsFromProfile('profile-other:foo')
# The main and third profile have been applied.
self.assertEqual(tool._imported,
[self._PROFILE_PATH3, self._PROFILE_PATH])
# The upgrade step of the second profile has been applied,
# pushing it to version 1.1.
self.assertEqual(tool.getLastVersionForProfile('other:bar'),
('1', '1'))
# Third profile is at 1.0.
self.assertEqual(tool.getLastVersionForProfile('other:ham'),
('1', '0'))
def test_runAllImportStepsFromProfile_with_reapply_strategy(self):
# You can choose the old behavior of always applying the
# dependencies. This ignores any upgrade steps.
tool = self._setup_dependency_strategy_test_tool()
# Run the main profile.
from ..tool import DEPENDENCY_STRATEGY_REAPPLY
tool.runAllImportStepsFromProfile(
'profile-other:foo',
dependency_strategy=DEPENDENCY_STRATEGY_REAPPLY)
# All three profiles have been applied.
self.assertEqual(tool._imported,
[self._PROFILE_PATH2, self._PROFILE_PATH3,
self._PROFILE_PATH])
self.assertEqual(tool.getLastVersionForProfile('other:bar'),
('1', '0'))
self.assertEqual(tool.getLastVersionForProfile('other:ham'),
('1', '0'))
def test_runAllImportStepsFromProfile_with_new_strategy(self):
# You can choose to be happy with any applied version and
# ignore any upgrade steps.
tool = self._setup_dependency_strategy_test_tool()
# Run the main profile.
from ..tool import DEPENDENCY_STRATEGY_NEW
tool.runAllImportStepsFromProfile(
'profile-other:foo',
dependency_strategy=DEPENDENCY_STRATEGY_NEW)
# The main and third profile have been applied.
self.assertEqual(tool._imported,
[self._PROFILE_PATH3, self._PROFILE_PATH])
# Second profile stays at 1.0.
self.assertEqual(tool.getLastVersionForProfile('other:bar'),
('1', '0'))
self.assertEqual(tool.getLastVersionForProfile('other:ham'),
('1', '0'))
def test_runAllImportStepsFromProfile_with_ignore_strategy(self):
# You can choose to be ignore all dependency profiles.
tool = self._setup_dependency_strategy_test_tool()
# Run the main profile.
from ..tool import DEPENDENCY_STRATEGY_IGNORE
tool.runAllImportStepsFromProfile(
'profile-other:foo',
dependency_strategy=DEPENDENCY_STRATEGY_IGNORE)
# Only the main profile has been applied.
self.assertEqual(tool._imported,
[self._PROFILE_PATH])
# Second profile stays at 1.0.
self.assertEqual(tool.getLastVersionForProfile('other:bar'),
('1', '0'))
# Third profile is not applied.
self.assertEqual(tool.getLastVersionForProfile('other:ham'),
('unknown'))
def test_runAllImportStepsFromProfile_unknown_strategy(self):
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
profile_registry.registerProfile('foo', 'Foo', '', self._PROFILE_PATH)
self.assertRaises(ValueError, tool.runAllImportStepsFromProfile,
'profile-other:foo', dependency_strategy='random')
def test_runAllImportStepsFromProfile_set_last_profile_version(self):
from ..metadata import METADATA_XML
self._makeFile(METADATA_XML, _METADATA_XML)
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
profile_registry.registerProfile('foo', 'Foo', '', self._PROFILE_PATH)
# test initial states
profile_id = "other:foo"
self.assertEqual(tool.getVersionForProfile(profile_id), '1.0')
self.assertEqual(tool.getLastVersionForProfile(profile_id),
'unknown')
# run all imports steps
tool.runAllImportStepsFromProfile('profile-other:foo',
ignore_dependencies=True)
# events.handleProfileImportedEvent should set last profile version
self.assertEqual(tool.getLastVersionForProfile(profile_id),
('1', '0'))
def test_runAllImportStepsFromProfile_step_registration_with_depends(self):
from ..metadata import METADATA_XML
self._makeFile(METADATA_XML, _METADATA_XML)
_IMPORT_STEPS_XML = """<?xml version="1.0"?>
<import-steps>
<import-step id="one"
version="1"
handler="Products.GenericSetup.tests.common.dummy_handler"
title="One Step">
One small step
</import-step>
</import-steps>
"""
self._makeFile('import_steps.xml', _IMPORT_STEPS_XML)
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
profile_registry.registerProfile('foo', 'Foo', '', self._PROFILE_PATH)
profile_registry.registerProfile('bar', 'Bar', '', self._PROFILE_PATH2)
result = tool.runAllImportStepsFromProfile('profile-other:foo',
ignore_dependencies=False)
# ensure the additional step on foo was imported
self.assertTrue('one' in result['steps'])
def test_runAllImportStepsFromProfile_skipStep(self):
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
result = tool.runAllImportStepsFromProfile(
'snapshot-dummy',
blacklisted_steps=['toolset'],
)
self.assertEqual((result['messages']['toolset']), 'step skipped')
def test_runAllImportStepsFromProfile_with_base_profile(self):
# Applying a base profile should clear the profile upgrade
# versions.
from ..interfaces import BASE
from ..interfaces import EXTENSION
from ..metadata import METADATA_XML
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
self._makeFile(METADATA_XML, _METADATA_XML)
_makeTestFile(METADATA_XML, self._PROFILE_PATH2, _PLAIN_METADATA_XML)
_makeTestFile(METADATA_XML, self._PROFILE_PATH3, _PLAIN_METADATA_XML)
# Register a base and two extension profile. The base profile
# 'foo' has a dependency 'bar'. This might not make sense,
# but it will serve to check that we clear the profile
# versions right before we apply the base profile, which means
# right after any dependency profiles.
profile_registry.registerProfile(
'foo', 'Foo', '', self._PROFILE_PATH, profile_type=BASE)
profile_registry.registerProfile(
'bar', 'Bar', '', self._PROFILE_PATH2, profile_type=EXTENSION)
profile_registry.registerProfile(
'ham', 'Ham', '', self._PROFILE_PATH3, profile_type=EXTENSION)
# Apply the extension profile.
tool.runAllImportStepsFromProfile('profile-other:ham')
self.assertEqual(tool._profile_upgrade_versions,
{u'other:ham': (u'1', u'0')})
# Apply the base profile.
tool.runAllImportStepsFromProfile('profile-other:foo')
self.assertEqual(tool._profile_upgrade_versions,
{u'other:foo': (u'1', u'0')})
def test_runAllImportStepsFromProfile_with_unknown_pre_handler(self):
# Registering already fails.
self.assertRaises(
ValueError, profile_registry.registerProfile,
'foo', 'Foo', '', self._PROFILE_PATH,
pre_handler='Products.GenericSetup.tests.test_tool.foo_handler')
def test_runAllImportStepsFromProfile_with_unknown_post_handler(self):
# Registering already fails.
self.assertRaises(
ValueError, profile_registry.registerProfile,
'foo', 'Foo', '', self._PROFILE_PATH,
post_handler='Products.GenericSetup.tests.test_tool.foo_handler')
def test_runAllImportStepsFromProfile_pre_post_handlers_dotted_names(self):
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
profile_registry.registerProfile(
'foo', 'Foo', '', self._PROFILE_PATH,
pre_handler='Products.GenericSetup.tests.test_tool.pre_handler',
post_handler='Products.GenericSetup.tests.test_tool.post_handler')
tool.runAllImportStepsFromProfile('profile-other:foo')
self.assertEqual(tool.pre_handler_called, 1)
self.assertEqual(tool.post_handler_called, 1)
tool.runAllImportStepsFromProfile('profile-other:foo')
self.assertEqual(tool.pre_handler_called, 2)
self.assertEqual(tool.post_handler_called, 2)
def test_runAllImportStepsFromProfile_pre_post_handlers_functions(self):
# When you register a profile with pre/post handlers in zcml, you do
# not get dotted names (strings) but an actual function.
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
profile_registry.registerProfile(
'foo', 'Foo', '', self._PROFILE_PATH,
pre_handler=pre_handler,
post_handler=post_handler)
tool.runAllImportStepsFromProfile('profile-other:foo')
self.assertEqual(tool.pre_handler_called, 1)
self.assertEqual(tool.post_handler_called, 1)
tool.runAllImportStepsFromProfile('profile-other:foo')
self.assertEqual(tool.pre_handler_called, 2)
self.assertEqual(tool.post_handler_called, 2)
def test_runExportStep_nonesuch(self):
site = self._makeSite()
tool = self._makeOne('setup_tool').__of__(site)
self.assertRaises(ValueError, tool.runExportStep, 'nonesuch')
def test_runExportStep_step_registry_empty(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
result = tool.runExportStep('step_registries')
self.assertEqual(len(result['steps']), 1)
self.assertEqual(result['steps'][0], 'step_registries')
self.assertEqual(result['messages']['step_registries'], None)
def test_runExportStep_step_registry_default(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool._import_registry.registerStep('foo', handler='foo.bar')
steps = 'Products.GenericSetup.tool.exportStepRegistries'
tool._export_registry.registerStep('step_registries', steps,
'Export import / export steps.')
result = tool.runExportStep('step_registries')
self.assertEqual(len(result['steps']), 1)
self.assertEqual(result['steps'][0], 'step_registries')
self.assertEqual(result['messages']['step_registries'], None)
fileish = BytesIO(result['tarball'])
self._verifyTarballContents(fileish,
['import_steps.xml', 'export_steps.xml'])
self._verifyTarballEntryXML(
fileish, 'import_steps.xml', _DEFAULT_STEP_REGISTRIES_IMPORT_XML)
self._verifyTarballEntryXML(
fileish, 'export_steps.xml', _DEFAULT_STEP_REGISTRIES_EXPORT_XML)
def test_runAllExportSteps_empty(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
result = tool.runAllExportSteps()
self.assertEqual(
sorted(result['steps']),
['componentregistry', 'rolemap', 'step_registries', 'toolset'])
self.assertEqual(result['messages']['step_registries'], None)
def test_runAllExportSteps_default(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool._import_registry.registerStep('foo', handler='foo.bar')
steps = 'Products.GenericSetup.tool.exportStepRegistries'
tool._export_registry.registerStep('step_registries', steps,
'Export import / export steps.')
result = tool.runAllExportSteps()
self.assertEqual(sorted(result['steps']),
['componentregistry', 'rolemap',
'step_registries', 'toolset'])
self.assertEqual(result['messages']['step_registries'], None)
fileish = BytesIO(result['tarball'])
self._verifyTarballContents(fileish,
['import_steps.xml', 'export_steps.xml',
'rolemap.xml', 'toolset.xml'])
self._verifyTarballEntryXML(
fileish, 'import_steps.xml', _DEFAULT_STEP_REGISTRIES_IMPORT_XML)
self._verifyTarballEntryXML(
fileish, 'export_steps.xml', _DEFAULT_STEP_REGISTRIES_EXPORT_XML)
def test_runAllExportSteps_extras(self):
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
steps = 'Products.GenericSetup.tool.exportStepRegistries'
tool._export_registry.registerStep('step_registries', steps,
'Export import / export steps.')
import_reg = tool.getImportStepRegistry()
import_reg.registerStep(
'dependable', '1', _underscoreSiteTitle, ('purging', ))
import_reg.registerStep(
'dependent', '1', _uppercaseSiteTitle, ('dependable', ))
import_reg.registerStep('purging', '1', _purgeIfRequired)
export_reg = tool.getExportStepRegistry()
export_reg.registerStep('properties', _exportPropertiesINI)
result = tool.runAllExportSteps()
self.assertEqual(len(result['steps']), 5)
self.assertEqual(sorted(result['steps']),
['componentregistry', 'properties', 'rolemap',
'step_registries', 'toolset'])
self.assertEqual(result['messages']['properties'],
'Exported properties')
self.assertEqual(result['messages']['step_registries'], None)
fileish = BytesIO(result['tarball'])
self._verifyTarballContents(fileish,
['import_steps.xml', 'export_steps.xml',
'properties.ini', 'rolemap.xml',
'toolset.xml'])
self._verifyTarballEntryXML(
fileish, 'import_steps.xml', _EXTRAS_STEP_REGISTRIES_IMPORT_XML)
self._verifyTarballEntryXML(
fileish, 'export_steps.xml', _EXTRAS_STEP_REGISTRIES_EXPORT_XML)
ini_string = _PROPERTIES_INI % site.title
self._verifyTarballEntry(fileish, 'properties.ini',
ini_string.encode('utf-8'))
def test_manage_importTarball(self):
# Tests for importing a tarball with GenericSetup files.
# We are especially interested to see if old settings get purged.
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
# We need to be Manager to see the result of calling
# manage_importTarball.
newSecurityManager(None, UnrestrictedUser('root', '', ['Manager'], ''))
ROLEMAP_XML = """<?xml version="1.0"?>
<rolemap>
<roles>
<role name="%s" />
</roles>
<permissions />
</rolemap>
"""
def rolemap_tarball(name):
# Create a tarball archive with rolemap.xml containing 'name' as
# role.
context = TarballExportContext(tool)
contents = ROLEMAP_XML % name
if isinstance(contents, six.text_type):
contents = contents.encode('utf-8')
context.writeDataFile('rolemap.xml', contents, 'text/xml')
return context.getArchive()
# Import first role.
tool.manage_importTarball(rolemap_tarball('First'))
self.assertTrue('First' in site.valid_roles())
# Import second role.
tool.manage_importTarball(rolemap_tarball('Second'))
self.assertTrue('Second' in site.valid_roles())
# The first role has been purged, because that is the default.
self.assertFalse('First' in site.valid_roles())
# A few standard roles are never removed, probably because they are
# defined one level higher.
self.assertTrue('Anonymous' in site.valid_roles())
self.assertTrue('Authenticated' in site.valid_roles())
self.assertTrue('Manager' in site.valid_roles())
self.assertTrue('Owner' in site.valid_roles())
# Import third role in non-purge mode.
tool.manage_importTarball(rolemap_tarball('Third'), purge_old=False)
self.assertTrue('Third' in site.valid_roles())
# The second role is still there.
self.assertTrue('Second' in site.valid_roles())
# When you use the form, and uncheck the purge_old checkbox, then the
# browser does not send the purge_old parameter in the request. To
# work around this, the form always passes a hidden 'submitted'
# parameter.
# Import fourth role in non-purge mode with a form submit.
tool.manage_importTarball(rolemap_tarball('Fourth'), submitted='yes')
self.assertTrue('Fourth' in site.valid_roles())
# The other roles are still there.
self.assertTrue('Second' in site.valid_roles())
self.assertTrue('Third' in site.valid_roles())
self.assertTrue('Manager' in site.valid_roles())
def test_createSnapshot_default(self):
_EXPECTED = [
('import_steps.xml', _DEFAULT_STEP_REGISTRIES_IMPORT_XML),
('export_steps.xml', _DEFAULT_STEP_REGISTRIES_EXPORT_XML),
('rolemap.xml', 'dummy'),
('toolset.xml', 'dummy'),
]
site = self._makeSite()
site.setup_tool = self._makeOne('setup_tool')
tool = site.setup_tool
tool._import_registry.registerStep('foo', handler='foo.bar')
tool._export_registry.registerStep(
'step_registries',
'Products.GenericSetup.tool.exportStepRegistries',
'Export import / export steps.')
self.assertEqual(len(tool.listSnapshotInfo()), 0)
result = tool.createSnapshot('default')
self.assertEqual(
sorted(result['steps']),
['componentregistry', 'rolemap', 'step_registries', 'toolset'])
self.assertEqual(result['messages']['step_registries'], None)
snapshot = result['snapshot']
self.assertEqual(len(snapshot.objectIds()), len(_EXPECTED))
for id in [x[0] for x in _EXPECTED]:
self.assertTrue(id in snapshot.objectIds())
def normalize_xml(xml):
# using this might mask a real problem on windows, but so far the
# different newlines just caused problems in this test
lines = [line.strip() for line in xml.splitlines() if line.strip()]
return ' '.join(lines)
fileobj = snapshot._getOb('import_steps.xml')
self.assertEqual(normalize_xml(fileobj.read()),
normalize_xml(_DEFAULT_STEP_REGISTRIES_IMPORT_XML))
fileobj = snapshot._getOb('export_steps.xml')
self.assertEqual(normalize_xml(fileobj.read()),
normalize_xml(_DEFAULT_STEP_REGISTRIES_EXPORT_XML))
self.assertEqual(len(tool.listSnapshotInfo()), 1)
info = tool.listSnapshotInfo()[0]
self.assertEqual(info['id'], 'default')
self.assertEqual(info['title'], 'default')
# Check access restriction on | |
<reponame>yellcorp/dupescan
import argparse
import os
import sys
import time
import traceback
from collections import defaultdict
from typing import Optional, Iterable, Iterator
from dupescan import (
console,
core,
criteria,
fs,
funcutil,
log,
report,
units,
)
from dupescan.cli._common import add_common_cli_args, set_encoder_errors
from dupescan.types import AnyPath
__all__ = ("delete_unmarked_in_report", "scan", "run")
def get_arg_parser():
p = argparse.ArgumentParser(
description="Find files with identical content.",
epilog="""Arguments that accept byte counts accept an integer with an
optional suffix indicating units. 'B' indicates bytes, which
is also the default if no suffix is provided. 'K' indicates
kibibytes (1024 bytes). 'M' indicates mebibytes. 'G'
indicates gibibytes, and 'T' indicates tebibytes."""
)
p.add_argument("paths",
nargs="*",
metavar="PATH",
help="""List of files to consider. Directories will be recursively
examined."""
)
p.add_argument("-s", "--symlinks",
action="store_true",
help="""Include symlinks."""
)
p.add_argument("-z", "--zero",
action="store_true",
help="""Include zero-length files. All zero-length files are considered
to have identical content. This option is equivalent to
--min-size 0"""
)
p.add_argument("-o", "--only-mixed-roots",
action="store_true",
help="""Only show duplicate files if they arise from recursing into
different root directories. This can speed operations if the
only results of interest are whether duplicates exist between
different filesystem hierarchies, rather than within a single
one. Note that this only has a useful effect if two or more
paths are provided."""
)
p.add_argument("-m", "--min-size",
type=units.parse_byte_count,
default=None,
metavar="SIZE",
help="""Ignore files smaller than %(metavar)s. This option accepts a
byte count. The default is 1."""
)
p.add_argument("-p", "--prefer",
metavar="CRITERIA",
help="""For each set of duplicate files, automatically select one
for preservation according to the provided criteria. Other
duplicates can be deleted by passing the generated report to
the -x/--delete option."""
)
p.add_argument("--exclude",
action="append",
metavar="NAME",
help="""Excludes files or directories with the given name. This feature
is is currently simplified - it only performs case-sensitive
literal comparisons against a filename - i.e. the last segment
of the file path. At some point it will be expanded to something
more like rsync/tar matching."""
)
p.add_argument("--time",
action="store_true",
help="""Add elasped time to the generated report."""
)
p.add_argument("--help-prefer",
action="store_true",
help="""Display detailed help on using the --prefer option"""
)
p.add_argument("-v", "--verbose",
action="store_true",
help="""Log detailed information to STDERR."""
)
p.add_argument("--no-progress",
dest="progress",
action="store_false",
help="""Don't show progress bars on STDERR."""
)
p.add_argument("-x", "--delete",
metavar="PATH",
help="""Delete unmarked files in the report at %(metavar)s. Sets where
no files are marked will be skipped."""
)
p.add_argument("-c", "--coalesce",
metavar="PATH",
help="""Replace duplicate files with hard links, using sets found in
the report at %(metavar)s. File marks are ignored - all
filenames are preserved."""
)
p.add_argument("-n", "--dry-run",
action="store_true",
help="""Used in combination with -x/--delete or -c/--coalesce. List
actions that those options would perform without actually doing
them."""
)
add_common_cli_args(p)
return p
def main():
"""Entry point for finddupes command.
Returns:
Exit code for passing to sys.exit()
"""
return run(sys.argv[1:])
def run(argv=None):
"""Run finddupes with the specified command line arguments.
Args:
argv (list of str or None): command line arguments, not including the
command itself (argv[0]).
Returns:
Exit code for passing to sys.exit()
"""
sys.stdout = set_encoder_errors(sys.stdout, "backslashreplace")
p = get_arg_parser()
args = p.parse_args(argv)
if args.help_prefer:
with open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"preferhelp"
)
) as stream:
print(stream.read().format(script_name=os.path.basename(sys.argv[0])))
return 0
if args.delete is not None or args.coalesce is not None:
if args.delete is not None and args.coalesce is not None:
print(
"Conflicting arguments: -x/--delete and -c/--coalesce are mutually exclusive.",
file=sys.stderr,
)
return 1
if any((
args.paths,
args.symlinks,
args.zero,
args.only_mixed_roots,
args.min_size,
args.prefer,
args.time,
args.exclude,
)):
print("Only -n/--dry-run can be used with -x/--delete or -c/--coalesce. All other options must be omitted.", file=sys.stderr)
return 1
if args.delete is not None:
return delete_unmarked_in_report(args.delete, args.dry_run, args.verbose)
hard_linker = HardLinker(args.coalesce, args.dry_run, args.verbose)
return hard_linker()
else:
config = ScanConfig()
if args.zero:
if args.min_size is not None and args.min_size > 0:
print(
"Conflicting arguments: --zero implies --min-size 0, but --min-size was also specified.",
file=sys.stderr,
)
return 1
config.min_file_size = 0
elif args.min_size != None:
config.min_file_size = args.min_size
if args.dry_run:
print("Warning: -n/--dry-run has no effect without -x/--delete or -c/--coalesce.", file=sys.stderr)
if len(args.paths) == 0:
print("No paths specified", file=sys.stderr)
return 1
config.recurse = True
config.only_mixed_roots = args.only_mixed_roots
config.include_symlinks = args.symlinks
config.prefer = args.prefer
config.verbose = args.verbose
config.progress = args.progress
config.max_memory = args.max_memory
config.max_buffer_size = args.max_buffer_size
config.log_time = args.time
if args.exclude:
config.exclude.extend(args.exclude)
if config.only_mixed_roots and len(args.paths) <= 1:
print("Warning: -o/--only-mixed-roots with a single path will not produce any results.", file=sys.stderr)
scan(args.paths, config)
return 0
class ScanConfig(object):
"""Configuration object affecting behavior of the scan() function.
Attributes:
recurse (bool): If True, recurse into directories and add all
descendant files to the set to consider for duplicate detection.
only_mixed_roots (bool): Only compare files if they were discovered
by recursing into different root-level directories.
min_file_size (int): Only consider files whose size in bytes is greater
or equal to this number.
include_symlinks (bool): If True, resolve symlinks, otherwise ignore
them.
prefer (str or None): A 'prefer' string expressing how to choose at least
one file from a set to be marked in the generated report.
verbose (bool): If True, print debugging info to stderr.
progress (bool): If True, print progress bars to stderr.
max_memory (int): Adjust buffer size and maximum number of open files so
as not to exceed this many bytes. This will be exceeded if set
below platform.MIN_BUFFER_SIZE. If 0, the default of
platform.DEFAULT_MAX_MEMORY is used.
max_buffer_size (int): Absolute maximum buffer size. This will always
be imposed even if max_memory allows a bigger buffer size. If 0,
the default of platform.DEFAULT_MAX_BUFFER_SIZE is used.
log_time (bool): If True, record the amount of time taken and append it
to the report.
exclude (List[str]): List of names to exclude.
"""
def __init__(self):
self.recurse = False
self.only_mixed_roots = False
self.min_file_size = 1
self.include_symlinks = False
self.prefer = None
self.verbose = False
self.progress = False
self.max_buffer_size = 0
self.max_memory = 0
self.log_time = False
self.exclude = []
def scan(paths: Iterable[AnyPath], config: Optional[ScanConfig]=None):
"""Run a duplicate scan and generate a report.
Args:
paths (iterable of Path): The set of files and/or top-level directories
to search for duplicates.
config (ScanConfig): A ScanConfig instance that configures various
aspects of the operation.
"""
if config is None:
config = ScanConfig()
logger = log.StreamLogger(
stream = sys.stderr,
min_level = log.DEBUG if config.verbose else log.INFO,
)
entries = create_file_iterator(
paths,
logger,
config.recurse,
config.exclude,
config.min_file_size,
config.include_symlinks
)
reporter = create_reporter(config.prefer)
if config.progress:
try:
use_unicode = sys.stderr.encoding in ("utf_8",)
except AttributeError:
use_unicode = False
compare_progress_handler = CompareProgressHandler(
glyphs = use_unicode,
stream = sys.stderr,
)
walk_progress_handler = WalkProgressHandler(stream=sys.stderr)
else:
walk_progress_handler = None
compare_progress_handler = None
find_dupes = core.DuplicateFinder(
max_memory = config.max_memory,
max_buffer_size = config.max_buffer_size,
cancel_func = cancel_if_single_root if config.only_mixed_roots else None,
logger = logger,
compare_progress_handler = compare_progress_handler,
walk_progress_handler = walk_progress_handler,
)
start_time = time.time() if config.log_time else 0
for dupe_set in find_dupes(entries):
reporter.handle_dupe_set(dupe_set)
if config.log_time:
print("# Elapsed time: %s" % units.format_duration(time.time() - start_time))
def create_file_iterator(
paths: Iterable[AnyPath],
logger=None,
recurse=False,
exclude: Optional[Iterable[str]]=None,
min_file_size=1,
include_symlinks=False
) -> Iterator[fs.FileEntry]:
if logger is not None:
def onerror(env_error):
logger.error(str(env_error))
else:
def onerror(_):
pass
ifunc = (
fs.recurse_iterator if recurse
else fs.flat_iterator
)
name_filter = None
if exclude:
exclude_set = frozenset(exclude)
name_filter = lambda e: e.basename not in exclude_set
file_size_filter = None
if min_file_size > 0:
file_size_filter = lambda e: e.size >= min_file_size
symlink_filter = None
if not include_symlinks:
symlink_filter = lambda e: not e.is_symlink
file_filter = funcutil.and_of(name_filter, funcutil.and_of(symlink_filter, file_size_filter))
dir_filter = name_filter
return ifunc(paths, dir_filter, file_filter, onerror)
def cancel_if_single_root(dupe_set):
index = -1
for entry in dupe_set.all_entries():
if index == -1:
index = entry.root.index
elif index != entry.root.index:
return False
return True
class WalkProgressHandler(object):
def __init__(self, stream=None, line_width=78):
self._status_line = console.StatusLine(
stream = stream if stream is not None else sys.stderr,
line_width = line_width,
elide_string = ".."
)
def progress(self, path):
self._status_line.set_text(path)
def complete(self):
self._status_line.clear()
GLYPHS = {
"ascii": ("#-", ""),
"unicode": (
"\u2588\u2591",
"\u2800\u2840\u28C0\u28C4\u28E4\u28E6\u28F6\u28F7\u28FF"
)
}
class CompareProgressHandler(object):
def __init__(self, glyphs=True, stream=None, line_width=78):
self._status_line = console.StatusLine(
stream = stream if stream is not None else sys.stderr,
line_width = line_width
)
if glyphs is True:
glyphs = GLYPHS["unicode"]
elif glyphs is False:
glyphs = GLYPHS["ascii"]
self._progress_glyphs, self._count_glyphs = glyphs
def progress(self, sets, file_pos, file_size):
set_vis_list = [ ]
for s in sets:
set_len = len(s)
if set_len >= len(self._count_glyphs):
set_vis_list.append(str(set_len))
else:
set_vis_list.append(self._count_glyphs[set_len])
| |
from urllib.parse import quote as urlquote
from discord.ext import commands
import classyjson as cj
import asyncio
import discord
import random
import typing
from util.code import format_exception
from util.misc import strip_command
ALPHABET_LOWER = "abcdefghijklmnopqrstuvwxyz"
INSULTS = {"i am stupid", "i am dumb", "i am very stupid", "i am very dumb", "i stupid", "i'm stupid", "i'm dumb"}
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.k = bot.k
self.aiohttp = bot.aiohttp
self.db = bot.get_cog("Database")
self.ipc = bot.ipc
def lang_convert(self, msg, lang):
keys = list(lang)
for key in keys:
msg = msg.replace(key, lang.get(key))
try:
msg = msg.replace(key.upper(), lang.get(key).upper())
except Exception:
pass
if len(msg) > 2000 - 6:
raise ValueError("message is too big")
return msg
@commands.command(name="meme", aliases=["meemee", "meem", "maymay", "mehmeh"])
@commands.cooldown(1, 1.5, commands.BucketType.user)
async def meme(self, ctx):
"""Sends a meme from reddit"""
do_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
do_nsfw = ctx.channel.is_nsfw()
meme = {"nsfw": True, "spoiler": True}
async with ctx.typing():
while meme["spoiler"] or (not do_nsfw and meme["nsfw"]) or meme.get("image") is None:
resp = await self.aiohttp.get(
"https://api.iapetus11.me/reddit/meme",
headers={"Authorization": self.k.villager_api},
params={"queryId": ctx.channel.id},
)
meme = cj.classify(await resp.json())
embed = discord.Embed(color=self.d.cc, title=meme.title[:256], url=meme.permalink)
embed.set_footer(text=f"{meme.upvotes} | u/{meme.author}", icon_url=self.d.upvote_emoji_image)
embed.set_image(url=meme.image)
await ctx.send(embed=embed)
@commands.command(name="4chan", aliases=["greentext"])
@commands.cooldown(1, 1.5, commands.BucketType.user)
async def greentext(self, ctx):
"""Sends a greentext from r/greentext"""
do_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
do_nsfw = ctx.channel.is_nsfw()
jj = {"nsfw": True}
async with ctx.typing():
while (not do_nsfw and jj["nsfw"]) or jj.get("image") is None:
resp = await self.aiohttp.get(
"https://api.iapetus11.me/reddit/greentext",
headers={"Authorization": self.k.villager_api},
params={"queryId": ctx.channel.id},
)
jj = await resp.json()
embed = discord.Embed(color=self.d.cc)
embed.set_image(url=jj["image"])
await ctx.send(embed=embed)
@commands.command(name="comic")
@commands.cooldown(1, 1.5, commands.BucketType.user)
async def comic(self, ctx):
"""Sends a comic from r/comics"""
do_nsfw = False
if isinstance(ctx.channel, discord.TextChannel):
do_nsfw = ctx.channel.is_nsfw()
comic = {"nsfw": True, "spoiler": True}
async with ctx.typing():
while comic["spoiler"] or (not do_nsfw and comic["nsfw"]) or comic.get("image") is None:
resp = await self.aiohttp.get(
"https://api.iapetus11.me/reddit/comic",
headers={"Authorization": self.k.villager_api},
params={"queryId": ctx.channel.id},
)
comic = cj.classify(await resp.json())
embed = discord.Embed(color=self.d.cc, title=comic.title[:256], url=comic.permalink)
embed.set_footer(text=f"{comic.upvotes} | u/{comic.author}", icon_url=self.d.upvote_emoji_image)
embed.set_image(url=comic.image)
await ctx.send(embed=embed)
@commands.command(name="cursed", aliases=["cursedmc"])
@commands.cooldown(1, 1.5, commands.BucketType.user)
async def cursed_mc(self, ctx):
if random.choice((True, False)):
meme = {"nsfw": True, "spoiler": True}
async with ctx.typing():
while meme["spoiler"] or meme["nsfw"] or meme.get("image") is None:
resp = await self.bot.aiohttp.get(
"https://api.iapetus11.me/reddit/cursedminecraft",
headers={"Authorization": self.k.villager_api},
params={"queryId": ctx.channel.id},
)
meme = cj.classify(await resp.json())
embed = discord.Embed(color=self.d.cc, title=meme.title[:256], url=meme.permalink)
embed.set_footer(text=f"{meme.upvotes} | u/{meme.author}", icon_url=self.d.upvote_emoji_image)
embed.set_image(url=meme.image)
await ctx.send(embed=embed)
else:
embed = discord.Embed(color=self.d.cc)
embed.set_image(url=f"https://iapetus11.me/static/images/cursed_minecraft/{random.choice(self.d.cursed_images)}")
await ctx.send(embed=embed)
@commands.command(name="say")
async def say_text(self, ctx, *, text):
"""Sends whatever is put into the command"""
nice = strip_command(ctx)
if nice.lower() in INSULTS:
await ctx.reply("Yes.")
return
try:
await ctx.message.delete()
except Exception:
pass
await ctx.send(nice)
@commands.command(name="villagerspeak")
async def villager_speak(self, ctx, *, msg):
"""Turns the given text into Minecraft villager sounds as text"""
try:
translated = self.lang_convert(strip_command(ctx), self.d.fun_langs.villager)
await ctx.send(translated)
except ValueError:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
@commands.command(name="enchant")
async def enchant_lang(self, ctx, *, msg):
"""Turns regular text into the Minecraft enchantment table language"""
try:
translated = self.lang_convert((strip_command(ctx)).lower(), self.d.fun_langs.enchant)
await ctx.send(translated)
except ValueError:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
@commands.command(name="unenchant")
async def unenchant_lang(self, ctx, *, msg):
"""Turns the Minecraft enchantment table language back into regular text"""
try:
translated = self.lang_convert(strip_command(ctx), self.d.fun_langs.unenchant)
await ctx.send(translated)
except ValueError:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
@commands.command(name="vaporwave")
async def vaporwave_text(self, ctx, *, msg):
"""Turns regular text into vaporwave text"""
try:
translated = self.lang_convert(strip_command(ctx), self.d.fun_langs.vaporwave)
await ctx.send(translated)
except ValueError:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
@commands.command(name="sarcastic", aliases=["spongebob"])
async def sarcastic_text(self, ctx, *, msg):
"""Turns regular text into "sarcastic" text from spongebob"""
msg = strip_command(ctx)
caps = True
sarcastic = ""
for letter in msg:
if not letter == " ":
caps = not caps
if caps:
sarcastic += letter.upper()
else:
sarcastic += letter.lower()
await ctx.send(sarcastic)
@commands.command(name="clap")
async def clap_cheeks(self, ctx, *, text):
"""Puts the :clap: emoji between words"""
clapped = ":clap: " + " :clap: ".join((strip_command(ctx)).split(" ")) + " :clap:"
if len(clapped) > 2000:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
return
await ctx.send(clapped)
@commands.command(name="emojify")
async def emojify(self, ctx, *, _text):
"""Turns text or images into emojis"""
stripped = (strip_command(ctx)).lower()
text = ""
for letter in stripped:
if letter in ALPHABET_LOWER:
text += f":regional_indicator_{letter}: "
else:
text += self.d.emojified.get(letter, letter) + " "
if len(text) > 2000:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
else:
await ctx.send(text)
@commands.command(name="owo", aliases=["owofy"])
async def owofy_text(self, ctx, *, text):
"""Make any text more cringe"""
text = text.lower().replace("l", "w").replace("r", "w")
if len(text) > 1950:
await self.bot.send_embed(ctx, ctx.l.fun.too_long)
else:
await ctx.send(f"{text} {random.choice(self.d.owos)}")
@commands.command(name="bubblewrap", aliases=["pop"])
async def bubblewrap(self, ctx, size=None):
"""Sends bubblewrap to the chat"""
if size is None:
size = (10, 10)
else:
size = size.split("x")
if len(size) != 2:
await self.bot.send_embed(ctx, ctx.l.fun.bubblewrap.invalid_size_1)
return
try:
size[0] = int(size[0])
size[1] = int(size[1])
except ValueError:
await self.bot.send_embed(ctx, ctx.l.fun.bubblewrap.invalid_size_1)
return
for val in size:
if val < 1 or val > 12:
await self.bot.send_embed(ctx, ctx.l.fun.bubblewrap.invalid_size_2)
return
bubble = "||**pop**||"
await self.bot.send_embed(ctx, f"{bubble*size[0]}\n" * size[1])
@commands.command(name="kill", aliases=["die", "kil", "dorito"])
async def kill_thing(self, ctx, *, thing: typing.Union[discord.Member, str]):
if isinstance(thing, discord.Member):
thing = thing.mention
await self.bot.send_embed(ctx, random.choice(self.d.kills).format(thing[:500], ctx.author.mention))
@commands.command(name="coinflip", aliases=["flipcoin", "cf"])
async def coin_flip(self, ctx):
await self.bot.send_embed(ctx, random.choice(("heads", "tails")))
@commands.command(name="pat")
@commands.guild_only()
async def pat(self, ctx, users: commands.Greedy[discord.Member] = [], *, text: str = ""):
resp = await self.bot.aiohttp.get("https://rra.ram.moe/i/r?type=pat")
image_url = "https://rra.ram.moe" + (await resp.json())["path"]
embed = discord.Embed(
color=self.d.cc,
title=f"**{discord.utils.escape_markdown(ctx.author.display_name)}** pats {', '.join(f'**{discord.utils.escape_markdown(u.display_name)}**' for u in users)} {text}"[
:256
],
)
embed.set_image(url=image_url)
await ctx.send(embed=embed)
@commands.command(name="slap")
@commands.guild_only()
async def slap(self, ctx, users: commands.Greedy[discord.Member] = [], *, text: str = ""):
resp = await self.bot.aiohttp.get("https://rra.ram.moe/i/r?type=slap")
image_url = "https://rra.ram.moe" + (await resp.json())["path"]
embed = discord.Embed(
color=self.d.cc,
title=f"**{discord.utils.escape_markdown(ctx.author.display_name)}** slaps {', '.join(f'**{discord.utils.escape_markdown(u.display_name)}**' for u in users)} {text}"[
:256
],
)
embed.set_image(url=image_url)
await ctx.send(embed=embed)
@commands.command(name="achievement", aliases=["mcachieve"])
@commands.cooldown(1, 1, commands.BucketType.user)
async def minecraft_achievement(self, ctx, *, text):
url = f"https://api.iapetus11.me/mc/achievement/{urlquote(text[:26])}"
embed = discord.Embed(color=self.d.cc)
embed.description = ctx.l.fun.dl_img.format(url)
embed.set_image(url=url)
await ctx.send(embed=embed)
@commands.command(name="splashtext", aliases=["mcsplash", "splashscreen", "splash"])
@commands.cooldown(1, 1, commands.BucketType.user)
async def minecraft_splash_screen(self, ctx, *, text):
url = f"https://api.iapetus11.me/mc/splash/{urlquote(text[:27])}"
embed = discord.Embed(color=self.d.cc)
embed.description = ctx.l.fun.dl_img.format(url)
embed.set_image(url=url)
await ctx.send(embed=embed)
def calculate_trivia_reward(self, question_difficulty: int) -> int:
return int((random.random() + 0.25) * (question_difficulty + 0.25) * 9) + 1
async def trivia_multiple_choice(self, ctx, question, do_reward):
correct_choice = question.a[0]
choices = question.a.copy()
random.shuffle(choices)
embed = discord.Embed(
color=self.d.cc,
title=ctx.l.fun.trivia.title.format(self.d.emojis.bounce, ctx.l.fun.trivia.difficulty[question.d], ":question:"),
)
embed.description = "*{}*".format(
"\n".join(map(" ".join, [question.q.split()[i : i + 7] for i in range(0, len(question.q.split()), 7)]))
)
embed.set_footer(text="\uFEFF\n" + ctx.l.fun.trivia.time_to_answer)
for i, c in enumerate(choices):
c_column = "\n".join(map(" ".join, [c.split()[i : i + 3] for i in range(0, len(c.split()), 3)]))
embed.add_field(name="\uFEFF", value=f"**{i+1}.** {c_column}")
if i % 2 == 0:
embed.add_field(name="\uFEFF", value="\uFEFF")
msg = await ctx.reply(embed=embed, mention_author=False)
for i in range(len(choices)):
await msg.add_reaction(self.d.emojis.numbers[i + 1])
def reaction_check(react, r_user):
return (
r_user == ctx.author
and ctx.channel == react.message.channel
and msg == react.message
and react.emoji in self.d.emojis.numbers[1 : len(choices) + 1]
)
try:
react, r_user = await self.bot.wait_for("reaction_add", check=reaction_check, timeout=15)
except asyncio.TimeoutError:
embed = discord.Embed(
color=self.d.cc,
title=ctx.l.fun.trivia.title_basic.format(self.d.emojis.bounce, ":question:"),
description=ctx.l.fun.trivia.timeout,
)
await msg.edit(embed=embed)
return
finally:
try:
await msg.clear_reactions()
except Exception:
pass
embed = discord.Embed(
color=self.d.cc,
title=ctx.l.fun.trivia.title_basic.format(self.d.emojis.bounce, ":question:"),
)
if choices[self.d.emojis.numbers.index(react.emoji) - 1] == correct_choice:
if do_reward:
emeralds_won = self.calculate_trivia_reward(question.d)
await self.db.balance_add(ctx.author.id, emeralds_won)
correct = random.choice(ctx.l.fun.trivia.correct).format(emeralds_won, self.d.emojis.emerald)
else:
correct = random.choice(ctx.l.fun.trivia.correct).split("\n")[0]
embed.description = correct
else:
embed.description = random.choice(ctx.l.fun.trivia.incorrect)
await msg.edit(embed=embed)
async def trivia_true_or_false(self, ctx, question, do_reward):
correct_choice = question.a[0]
embed = discord.Embed(
color=self.d.cc,
title=ctx.l.fun.trivia.title.format(self.d.emojis.bounce, ctx.l.fun.trivia.difficulty[question.d], ":question:"),
)
embed.description = "*{}*".format(
"\n".join(map(" ".join, [question.q.split()[i : i + 7] for i in range(0, len(question.q.split()), 7)]))
)
embed.set_footer(text="\uFEFF\n" + ctx.l.fun.trivia.time_to_answer)
msg = await ctx.reply(embed=embed, mention_author=False)
await msg.add_reaction(self.d.emojis.yes)
await msg.add_reaction(self.d.emojis.no)
def reaction_check(react, r_user):
return (
r_user == ctx.author
and ctx.channel == react.message.channel
and msg == react.message
and str(react.emoji) in [self.d.emojis.yes, self.d.emojis.no]
)
try:
react, r_user = await self.bot.wait_for("reaction_add", check=reaction_check, timeout=15)
except asyncio.TimeoutError:
embed = discord.Embed(
color=self.d.cc,
title=ctx.l.fun.trivia.title_basic.format(self.d.emojis.bounce, ":question:"),
description=ctx.l.fun.trivia.timeout,
)
await msg.edit(embed=embed)
return
finally:
try:
await msg.clear_reactions()
except Exception:
pass
embed = discord.Embed(
color=self.d.cc,
title=ctx.l.fun.trivia.title_basic.format(self.d.emojis.bounce, ":question:"),
)
if (correct_choice == "true" and str(react.emoji) == self.d.emojis.yes) or (
correct_choice == "false" and str(react.emoji) == self.d.emojis.no
):
if do_reward:
emeralds_won = self.calculate_trivia_reward(question.d)
await self.db.balance_add(ctx.author.id, emeralds_won)
correct = random.choice(ctx.l.fun.trivia.correct).format(emeralds_won, self.d.emojis.emerald)
else:
correct = random.choice(ctx.l.fun.trivia.correct).split("\n")[0]
embed.description = correct
else:
embed.description = random.choice(ctx.l.fun.trivia.incorrect)
await msg.edit(embed=embed)
@commands.command(name="trivia", aliases=["mctrivia"])
@commands.max_concurrency(1, per=commands.BucketType.user)
async def minecraft_trivia(self, ctx):
do_reward = (
await self.ipc.exec(f"trivia_commands[{ctx.author.id}] += 1\nreturn trivia_commands[{ctx.author.id}] < 5")
).result
question = random.choice(ctx.l.fun.trivia.questions)
if question.tf:
await self.trivia_true_or_false(ctx, question, do_reward)
else:
await self.trivia_multiple_choice(ctx, question, | |
of columns to highlight
headings = [row for row in ws.iter_rows()][0]
headings = [x.value for x in headings]
matching_ = dict.fromkeys(common_columns, 0)
matching = {}
for col in matching_:
matching[col] = 0
matching[col + CHECK_SUFFIX] = 0
for col in headings:
if col in matching.keys():
matching[col] = headings.index(col)
for index, row in enumerate(ws.iter_rows()):
# headers exclueded
if index != 0:
row_values = [x.value for x in row]
worker_check = {}
worker_errors = []
# gather worker data from his row
for key in matching:
val = row_values[matching[key]]
if isinstance(val, str) and "€" in val:
val = val.replace("€", "").replace("-", "").replace(",", ".").strip()
worker_check[key] = float(val) if val else 0
elif not val:
worker_check[key] = 0
elif isinstance(val, float) or isinstance(val, int):
worker_check[key] = val
# find worker errors
for data in worker_check:
if CHECK_SUFFIX not in data:
try:
if worker_check[data] - worker_check[data + CHECK_SUFFIX] != 0:
worker_errors.append(data)
worker_errors.append(data + CHECK_SUFFIX)
except Exception as e:
print(e)
if worker_errors:
# parse errors to cells
highlight_row = index + 1
for _i, error in enumerate(worker_errors):
highlight_column = get_column_letter(matching[error] + 1)
worker_errors[_i] = str(highlight_column) + str(highlight_row)
for c in worker_errors:
cell = ws[c]
cell.fill = PatternFill(start_color='FFEE1111', end_color='FFEE1111', fill_type='solid')
# drop refer columns conditionally
if not keep_refer_values:
col_to_remove = []
for val in matching:
if CHECK_SUFFIX in val:
col_to_remove.append(matching[val] + 1)
for val in sorted(col_to_remove, reverse=True):
ws.delete_cols(val)
# replace old verification with edited one
destination_workbook.remove(destination_workbook["Verifica Buste Paga"])
ws.title = "Verifica Buste Paga"
destination_workbook.save(self.verify_filename)
print(f">> PAYCHECKS COMPARED WITH DRIVE {sheet} VALUES SUCCESSFULLY")
return problems
class BillingManager():
""" classe non utilzzata """
def __init__(self, bill_name="Fattura"):
self.bill_name = f"{bill_name}.xlsx"
self.badges_path = None # badges_path
self.regex_day_pattern = "([1-9]|[12]\d|3[01])[LMGVSF]"
self.name_cell = "B5" # in che cella del badge_path si trova il nome nei cartellini
self.pairing_schema = {
"COD QTA": ["COD", "QTA"],
"ENT USC": ["ENT", "USC"],
"GIOR PROG": ["GIOR", "PROG"]
}
self.untouchable_keys = ["id", "tag"]
self.total_content = None
# model configs
self.model_name = "Modello fatturazione.xlsx"
self.footer_color = "e6e6e6"
# config paths
self._clients_path = "../config_files/BusinessCat billing/clients.json"
self._billing_profiles_path = "../config_files/BusinessCat billing/billing_profiles.json"
self._jobs_path = "../config_files/BusinessCat billing/jobs.json"
# load data from config paths
self.__load_clients()
self.__load_billing_profiles()
self.__load_jobs()
# defaults
self.default_new_job = {
"id":"",
"name":"",
"billing_profile_id":""
}
self.default_new_client = {
"id":"",
"name":""
}
self.default_billing_profile = {
"id": "",
"name": "",
"pricelist": [
{
"tag": "OR",
"name": "ore_ordinarie",
"price": 0.0
},
{
"tag": "ST",
"name": "ore_straordinarie",
"price": 0.0
},
{
"tag": "MN",
"name": "ore_notturne",
"price": 0.0
},
{
"tag": "OF",
"name": "ore_festive",
"price": 0.0
},
{
"tag": "SF",
"name": "ore_straordinarie_festive",
"price": 0.0
},
{
"tag": "SN",
"name": "ore_straordinarie_notturne",
"price": 0.0
},
{
"tag": "FN",
"name": "ore_festive_notturne",
"price": 0.0
}
]
}
print(">> BillingManager Initialized")
""" PRIVATE METHODS """
def __get_engine(self):
"""get engine conditional based on extension of self.badges_path"""
if not self.badges_path:
raise Exception("badges_path missing!")
elif self.badges_path.rsplit(".")[-1] == "xlsx":
engine = "openpyxl"
elif self.badges_path.rsplit(".")[-1] == "xls":
engine = "xlrd"
else:
raise TypeError("self.badges_path is not an Excel!")
return engine
def __load_clients(self):
""" read and load current billing_profiles file """
# create empty file if not existing
if not os.path.exists(self._clients_path):
init_data = []
with open(self._clients_path, "w") as f:
f.write(json.dumps(init_data, indent=4, ensure_ascii=True))
print("** created new clients.json file")
with open(self._clients_path,"r") as f:
self.clients = json.load(f)
print("** clients caricati")
def __load_billing_profiles(self):
""" read and load current billing_profiles file """
# create empty file if not existing
if not os.path.exists(self._billing_profiles_path):
init_data = []
with open(self._billing_profiles_path, "w") as f:
f.write(json.dumps(init_data, indent=4, ensure_ascii=True))
print("** created new billing_profile.json file")
with open(self._billing_profiles_path,"r") as f:
self.billing_profiles = json.load(f)
print("** billing_profiles caricati")
def __load_jobs(self):
""" read and load current billing_profiles file """
# create empty file if not existing
if not os.path.exists(self._jobs_path):
init_data = []
with open(self._jobs_path, "w") as f:
f.write(json.dumps(init_data, indent=4, ensure_ascii=True))
print("** created new jobs.json file")
with open(self._jobs_path,"r") as f:
self.jobs = json.load(f)
self.jobs_namelist = sorted([job["name"] for job in self.jobs])
self.jobs_namelist.insert(0,"")
print("** jobs caricati")
def __load_Excel_badges(self):
"""Load excel data of badges file (must be .xls or .xlsx)"""
if not self.badges_path:
raise Exception("ERROR: No badges_path specified")
engine = self.__get_engine()
try:
if engine == "openpyxl":
xlsx_data = openpyxl.load_workbook(self.badges_path)
sheet_names = xlsx_data.sheetnames
elif engine == "xlrd":
xlsx_data = xlrd.open_workbook(self.badges_path, on_demand=True, logfile=open(os.devnull, 'w'))
sheet_names = xlsx_data.sheet_names()
else:
raise
return xlsx_data, sheet_names, engine
except Exception as e:
raise Exception(f"Cannot load_Excel_badges. Error: {e}")
def __manage_columns(self, df):
""" private method to fix original column names"""
fixed_columns = []
prev_fixed = False
for index, v in enumerate(df.columns.values):
if prev_fixed:
prev_fixed = False
continue
new_value = df.columns.values[index].split()
new_value = " ".join(new_value).strip()
if new_value.startswith("COD QTA"):
new_value = new_value.split()
if len(new_value[1]) > 3:
new_value[0] = new_value[0] + new_value[1][3:]
fixed_columns.append(new_value[0])
fixed_columns.append(new_value[1])
prev_fixed = True
else:
fixed_columns.append(new_value)
df.columns = fixed_columns
to_remove = []
for index, col in enumerate(df.columns.values):
if col.startswith("Unnamed"):
# if col not in fixed_columns:
to_remove.append(index)
return df.drop(df.columns[to_remove], axis=1)
def __get_badge_name(self, sheet_obj):
"""get owner's name out of sheet"""
engine = self.__get_engine()
try:
if engine == "openpyxl":
badge_name = (sheet_obj[self.name_cell]).value
elif engine == "xlrd":
badge_name = sheet_obj.cell_value(int(self.name_cell[1:]) - 1, int(openpyxl.utils.cell.column_index_from_string(self.name_cell[0])) - 1)
else:
raise
try:
badge_name = " ".join(badge_name.split())
except:
badge_name = None
return badge_name
except Exception as e:
raise Exception(f"Cannot get_badge_name. Error: {e}")
def __minutes_to_int(self, decimal_time):
""" decimal_time => (str) MM"""
# if 0 not present attach it
if len(decimal_time) == 1:
decimal_time = decimal_time + "0"
hour_min = 60
result = (100 * int(decimal_time)) / hour_min
return str(int(result))
def __round_float(self, float_number, decimal_pos=2):
try:
float(float_number)
except:
raise TypeError("Cannot round: not a float number")
rounded = str(float_number).split(".")
rounded = float(rounded[0] + "." + rounded[1][:decimal_pos])
return rounded
def __smart_renamer(self, name):
old_name = name.split(" ")
new_name = ""
try:
for index, word in enumerate(old_name):
word = word.strip()
if word:
if index < len(old_name):
new_name += (word[0].upper() + word[1:].lower() + " ")
else:
new_name += (word[0].upper() + word[1:].lower())
except:
new_name = name[0].upper() + name[1:].lower()
return new_name
def __gp_column_renamer(self, name):
lookup_table = {
"Ore ORD": "OR",
"Ore STR": "ST",
"Ore NOTT": "MN",
"Ore FEST": "OF",
"Ore STR/FEST": "SF",
"Ore STR/NOTT": "SN",
"Ore FEST/NOTT": "FN"
}
new_name = None
if name in lookup_table:
new_name = lookup_table[name]
if not new_name:
new_name = name
return new_name
def __apply_billing_profile(self, hours_to_bill, billing_profile):
"""
steps: 1. adding time, 2. apply pattern, 3. apply pricing
"""
priced_hours = {}
# check integrity and get tag to focus
if hours_to_bill["OR"] and hours_to_bill["OF"]:
raise ValueError("ERROR: there are both ordinary and holiday hours on a single day")
else:
if hours_to_bill["OF"]:
tag = "OF"
elif hours_to_bill["OR"]:
tag = "OR"
else:
tag = None
# adding time (TIME IS ALWAYS ADDED IN BASE 100 eg. half an hour is not 0.30 but 0.5)
if tag:
if billing_profile["time_to_add"] and hours_to_bill[tag]:
if billing_profile["add_over_threshold"]:
if hours_to_bill[tag] >= billing_profile["threshold_hour"]:
hours_to_bill[tag] += billing_profile["time_to_add"]
else:
hours_to_bill[tag] += billing_profile["time_to_add"]
# apply pattern
if billing_profile["pattern"] and hours_to_bill[tag]:
new_amount = 0.0
start_val = copy.deepcopy(hours_to_bill[tag])
for i in range(len(billing_profile["pattern"])):
operation = billing_profile["pattern"][i]["perform"].strip()
amount = billing_profile["pattern"][i]["amount"]
if operation == "/":
start_val /= amount
elif operation =="-":
start_val -= amount
elif operation == "+":
start_val += amount
elif operation =="*":
start_val *= amount
else:
raise Exception("ERROR: invalid operator in pattern. operator must be one of + - * /")
if billing_profile["pattern"][i]["keep"]:
new_amount += start_val
hours_to_bill[tag] = new_amount
# apply pricing
try:
if billing_profile["pricelist"]:
for hour_type in hours_to_bill:
for p in billing_profile["pricelist"]:
if hour_type == p["tag"]:
priced_hours[hour_type] = hours_to_bill[hour_type]*p["price"]
priced_hours[hour_type] = self.__round_float(priced_hours[hour_type], decimal_pos=2)
else:
raise Exception("ERROR: No pricelist specified!")
except:
print("not found")
return priced_hours
def __get_gp_data(self, gp_xls):
wb = openpyxl.load_workbook(gp_xls)
ws = wb.worksheets[0]
# prendo i nomi di colonna dalla prima riga
columns = list(ws.iter_rows())[0]
columns = [c.value for c in columns]
columns = {k: v for k, v in enumerate(columns) if v}
total_w = []
w_obj = None
for row in list(ws.iter_rows())[1:-1]:
row_content = {k: v.value for k, v in enumerate(row) if k in columns}
if row_content[1] and str(row_content[1]).lower().startswith("somma"): continue
for x in row_content:
if row_content[x] == None:
row_content[x] = 0
if row_content[0]:
if w_obj:
total_w.append(w_obj)
w_obj = {}
for item in columns.items():
w_obj[item[1]] = row_content[item[0]]
else:
for index in row_content:
key = columns[index]
if key != "Nome":
w_obj[key] += row_content[index]
return total_w
| |
<filename>win/pywinauto/controls/menuwrapper.py
# GUI Application automation and testing library
# Copyright (C) 2006 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""Wrapper around Menu's and Menu items
These wrappers allow you to work easily with menu items.
You can select or click on items and check if they are
checked or unchecked.
"""
__revision__ = "$Revision: 330 $"
import ctypes
import time
from .. import win32structures
from .. import win32functions
from .. import win32defines
from .. import findbestmatch
from ..timings import Timings
class MenuItemNotEnabled(RuntimeError):
"Raised when a menuitem is not enabled"
pass
class MenuItem(object):
"""Wrap a menu item"""
def __init__(self, ctrl, menu, index, on_main_menu = False):
"""Initalize the menu item
* **ctrl** The dialog or control that owns this menu
* **menu** The menu that this item is on
* **index** The Index of this menuitem on the menu
* **on_main_menu** True if the item is on the main menu
"""
self.index = index
self.menu = menu
self.ctrl = ctrl
self.on_main_menu = on_main_menu
def _read_item(self):
"""Read the menu item info
See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/winui/windowsuserinterface/resources/menus/menureference/menufunctions/getmenuiteminfo.asp
for more information."""
menu_info = win32structures.MENUITEMINFOW()
menu_info.cbSize = ctypes.sizeof (menu_info)
menu_info.fMask = \
win32defines.MIIM_CHECKMARKS | \
win32defines.MIIM_ID | \
win32defines.MIIM_STATE | \
win32defines.MIIM_SUBMENU | \
win32defines.MIIM_TYPE #| \
#MIIM_FTYPE #| \
#MIIM_STRING
#MIIM_DATA | \
ret = win32functions.GetMenuItemInfo (
self.menu,
self.index,
True,
ctypes.byref(menu_info))
if not ret:
raise ctypes.WinError()
return menu_info
def FriendlyClassName(self):
return "MenuItem"
def Rectangle(self):
"Get the rectangle of the menu item"
rect = win32structures.RECT()
if self.on_main_menu:
ctrl = self.ctrl
else:
ctrl = 0
win32functions.GetMenuItemRect(
ctrl,
self.menu,
self.index,
ctypes.byref(rect))
return rect
def Index(self):
"Return the index of this menu item"
return self.index
def State(self):
"Return the state of this menu item"
return self._read_item().fState
def ID(self):
"Return the ID of this menu item"
return self._read_item().wID
def Type(self):
"""Return the Type of this menu item
Main types are MF_STRING, MF_BITMAP, MF_SEPARATOR.
See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/winui/windowsuserinterface/resources/menus/menureference/menustructures/menuiteminfo.asp
for further information."""
return self._read_item().fType
def Text(self):
"Return the state of this menu item"
info = self._read_item()
# if there is text
if info.cch:
# allocate a buffer
buffer_size = info.cch+1
text = ctypes.create_unicode_buffer(buffer_size)
# update the structure and get the text info
info.dwTypeData = ctypes.addressof(text)
info.cch = buffer_size
win32functions.GetMenuItemInfo (
self.menu,
self.index,
True,
ctypes.byref(info))
text = text.value
else:
text = ''
return text
def SubMenu(self):
"Return the SubMenu or None if no submenu"
submenu_handle = self._read_item().hSubMenu
if submenu_handle:
self.ctrl.SendMessageTimeout(
win32defines.WM_INITMENUPOPUP,
submenu_handle,
self.index)
return Menu(self.ctrl, submenu_handle, False, self)
return None
def IsEnabled(self):
"Return True if the item is enabled."
return not (
self.State() & win32defines.MF_DISABLED or
self.State() & win32defines.MF_GRAYED)
def IsChecked(self):
"Return True if the item is checked."
return bool(self.State() & win32defines.MF_CHECKED)
def Click(self):
"""Click on the menu item
If the menu is open this it will click with the mouse on the item.
If the menu is not open each of it's parent's will be opened
until the item is visible.
"""
self.ctrl.VerifyEnabled()
rect = self.Rectangle()
if not self.IsEnabled():
raise MenuItemNotEnabled(
"MenuItem '%s' is disabled"% self.Text())
# if the item is not visible - work up along it's parents
# until we find an item we CAN click on
if rect == (0, 0, 0, 0):
if self.menu.owner_item:
self.menu.owner_item.Click()
rect = self.Rectangle()
x_pt = (rect.left + rect.right) /2
y_pt = (rect.top + rect.bottom) /2
from .HwndWrapper import _perform_click_input #, delay_after_menuselect
_perform_click_input(
None,
coords = (x_pt, y_pt),
absolute = True)
win32functions.WaitGuiThreadIdle(self.ctrl)
#import time
#time.sleep(delay_after_menuselect)
def Select(self):
"""Select the menu item
This will send a message to the parent window that the
item was picked
"""
if not self.IsEnabled():
raise MenuItemNotEnabled(
"MenuItem '%s' is disabled"% self.Text())
#from HwndWrapper import delay_after_menuselect
#if self.State() & win32defines.MF_BYPOSITION:
# print self.Text(), "BYPOSITION"
# self.ctrl.NotifyMenuSelect(self.Index(), True)
#else:
# seems like SetFoucs might be messing with getting the
# id for Popup menu items - so I calling it before SetFocus
command_id = self.ID()
# notify the control that a menu item was selected
self.ctrl.SetFocus()
self.ctrl.SendMessageTimeout(
win32defines.WM_COMMAND,
win32functions.MakeLong(0, command_id))
#self.ctrl.NotifyMenuSelect(self.ID())
win32functions.WaitGuiThreadIdle(self.ctrl)
time.sleep(Timings.after_menu_wait)
def GetProperties(self):
"""Return the properties for the item as a dict
If this item opens a sub menu then call Menu.GetProperties()
to return the list of items in the sub menu. This is avialable
under the 'MenuItems' key
"""
props = {}
props['Index'] = self.Index()
props['State'] = self.State()
props['Type'] = self.Type()
props['ID'] = self.ID()
props['Text'] = self.Text()
submenu = self.SubMenu()
if submenu:
props['MenuItems'] = submenu.GetProperties()
return props
def __repr__(self):
"Return a representation of the object as a string"
return "<MenuItem %s>" % repr(self.Text())
# def Check(self):
# item = self._read_item()
# item.fMask = win32defines.MIIM_STATE
# item.fState &= win32defines.MF_CHECKED
#
## ret = win32functions.SetMenuItemInfo(
## self.menuhandle,
## self.ID(),
## 0, # by position
## ctypes.byref(item))
##
## if not ret:
## raise ctypes.WinError()
#
# print win32functions.CheckMenuItem(
# self.menuhandle,
# self.index,
# win32defines.MF_BYPOSITION | win32defines.MF_CHECKED)
#
# win32functions.DrawMenuBar(self.ctrl)
#
# def UnCheck(self):
# item = self._read_item()
# item.fMask = win32defines.MIIM_STATE
# item.fState &= win32defines.MF_UNCHECKED
#
# ret = win32functions.SetMenuItemInfo(
# self.menuhandle,
# self.ID(),
# 0, # by position
# ctypes.byref(item))
#
# if not ret:
# raise ctypes.WinError()
#
# win32functions.DrawMenuBar(self.ctrl)
#
#
class Menu(object):
"""A simple wrapper around a menu handle
A menu supports methods for querying the menu
and getting it's menu items."""
def __init__(
self,
owner_ctrl,
menuhandle,
is_main_menu = True,
owner_item = None):
"""Initialize the class.
* **owner_ctrl** is the Control that owns this menu
* **menuhandle** is the menu handle of the menu
* **is_main_menu** we have to track whether it is the main menu
or a popup menu
* **owner_item** The item that contains this menu - this will be
None for the main menu, it will be a MenuItem instance for a
submenu.
"""
self.ctrl = owner_ctrl
self.handle = menuhandle
self.is_main_menu = is_main_menu
self.owner_item = owner_item
self._as_parameter_ = self.handle
if self.is_main_menu:
self.ctrl.SendMessageTimeout(win32defines.WM_INITMENU, self.handle)
def ItemCount(self):
"Return the count of items in this menu"
return win32functions.GetMenuItemCount(self.handle)
def Item(self, index):
"""Return a specific menu item
* **index** is the 0 based index of the menu item you want
"""
return MenuItem(self.ctrl, self, index, self.is_main_menu)
def Items(self):
"Return a list of all the items in this menu"
items = []
for i in range(0, self.ItemCount()):
items.append(self.Item(i))
return items
def GetProperties(self):
"""Return the properties for the menu as a list of dictionaries
This method is actually recursive. It calls GetProperties() for each
of the items. If the item has a sub menu it will call this
GetProperties to get the sub menu items.
"""
item_props = []
for item in self.Items():
item_props.append(item.GetProperties())
return {'MenuItems': item_props}
def GetMenuPath(self, path, path_items = None, appdata = None):
"""Walk the items in this menu to find the item specified by path
The path is specified by a list of items separated by '->' each Item
can be either a string (can include spaces) e.g. "Save As" or the zero
based index of the item to return prefaced by # e.g. #1.
These can be mixed as necessary. For Example:
"#0 -> Save As",
"Tools -> #0 -> Configure"
Text matching is done using a 'best match' fuzzy algorithm, so you don't
have to add all puntuation, elipses, etc.
"""
if path_items is None:
path_items = []
# get the first part (and remainder)
parts = path.split("->", 1)
current_part = parts[0]
if current_part.startswith("#"):
index = int(current_part[1:])
best_item = self.Item(index)
else:
# get the text names from the menu items
if appdata is None:
item_texts = [item.Text() for item in self.Items()]
else:
item_texts = [item['Text'] for item in appdata]
# find the item that best matches the current part
best_item = findbestmatch.find_best_match(
current_part,
| |
if NBnegready == False:
i = numpy.argmin(REX) # Find the sample number with the minimum
Min = REX[i]
if Min < (-1*NBlevel):
NBpeaks = NBpeaks + 1 # A Noise Blanker peak
NOISEblankeractive = True # The noise blanker is active
m1 = i - S1 # The first sample that has to be made zero
if m1 < 0: # Check if in the range of REX length
m1 = 0
m2 = i + S2 # The last sample that has to be made zero
if m2 > len(REX): # Check if in the range of REX length
m2 = len(REX)
while m1 < m2: # Make all samples between m1 and m2 zero
REX[m1] = 0
m1 = m1 + 1
else:
NBnegready = True
if NBpeaks >= MAXpeaks:
NBposready = True
NBnegready = True
# Do the FFT window function
REX = REX * FFTwindowshape # The windowing shape function only over the samples
if DEBUG == 2:
T2=time.time()
print("FFTwindowing + NoiseBlanker: " + str(T2-T1))
T1=time.time() # For time measurement of routines
# Zero padding of array for better interpolation of peak level of signals
fftsamples = ZEROpaddingvalue * SMPfftpwrTwo # Add zero's to the arrays
# FFT with numpy
fftresult = numpy.fft.fft(REX, n=fftsamples) # Do FFT+zeropadding till n=fftsamples with NUMPY
ALL = fftresult # ALL = Real + Imaginary part
ALL = ALL[STARTsample:STOPsample] # Delete the unused samples that will not be displayed
ALL = numpy.absolute(ALL) # Make absolute SQR(REX*REX + IMX*IMX) for VOLTAGE!
Totalcorr = float(ZEROpaddingvalue) / fftsamples # Make an amplitude correction for the zero padding and FFT samples used
Totalcorr = float(SMPfftpwrTwo) / SMPfft * Totalcorr # Make an amplitude correction for rounding the samples to the nearest power of 2
FFTresult = Totalcorr * ALL
if FFTaverage > 1:
if FFTmemory[0] == -1: # Memory cleared
FFTmemory = FFTresult
else:
FFTresult = FFTmemory + (FFTresult - FFTmemory) / FFTaverage
FFTmemory = FFTresult
if DEBUG == 2:
T2=time.time()
print("FFT calculation: " + str(T2-T1))
def UpdateText():
global AUDIOlevel
global AUDIOstatus
global Brightness
global CANVASheight
global CANVASwidth
global COLORaudiobar
global COLORaudiomax
global COLORaudiook
global COLORfrequency
global COLORcanvas
global COLORtext
global Contrast
global DEBUG
global DISPLAY
global FFTaverage
global FFTwindow
global FFTbandwidth
global FFTwindowname
global FTPmessage
global GRH
global HOURmarkers
global LoporaName
global NOISEblankeractive
global NOISEblankerlevel
global RASPIcputemperature
global RASPIterminalinfo
global RUNstatus
global RXbuffer
global RXbuffermax
global RXbuffermin
global SAMPLErate
global SMPfft
global SMPfftpwrTwo
global SNAPshotmessage
global STARTfrequency
global STATIONname
global THEimage
global TRACEdate
global TUNEDfrequency
global Vdiv
global X0L
global Y0T
global ZOOMfactor
T1=time.time()
# Open a draw item for THEimage
draw = ImageDraw.Draw(THEimage)
# Delete text items on the screen
draw.rectangle((0, GRH+Y0T+15, CANVASwidth+2,CANVASheight+2), fill=COLORcanvas)
# Trace information
txt = "Start at: " + TRACEdate + " " + STATIONname
x = X0L
y = Y0T+GRH+20
draw.text((x,y),txt, font=TEXTfont, fill=COLORtext)
if RASPIterminalinfo == True:
print(txt)
# Frequency information
Flo = STARTfrequency
Fpixel = (float(SAMPLErate / 2) / (SMPfftpwrTwo / 2 - 1)) * ZOOMfactor # Frequency step per pixel
Fhi = Flo + GRH * Fpixel
Ftxt = str(int(Flo * 10))
Ftxt = Ftxt[:len(Ftxt)-1] + "." + Ftxt[-1:]
txt = "Frequency range (Hz): " + Ftxt + " - "
Ftxt = str(int(Fhi * 10))
Ftxt = Ftxt[:len(Ftxt)-1] + "." + Ftxt[-1:]
txt = txt + Ftxt
Fd = (Fhi - Flo) / Vdiv
Ftxt = str(int(Fd * 100))
Ftxt = Ftxt[:len(Ftxt)-2] + "." + Ftxt[-2:]
txt = txt + " Hz/div: " + str(Vdiv)
txt = txt + " " + FFTwindowname
txt = txt + " Sample rate: " + str(SAMPLErate) + " FFTsamples: " + str(SMPfft)
if SMPfft != SMPfftpwrTwo:
txt = txt + " (" + str(SMPfftpwrTwo) + ")"
txt = txt + " Bandwidth (mHz): " + str(FFTbandwidth)
x = X0L
y = Y0T+GRH+39
draw.text((x,y),txt, font=TEXTfont, fill=COLORtext)
# Soundcard level bargraph
txt1 = "||||||||||||||||||||" # Bargraph
le = len(txt1) # Length of bargraph
t = int(math.sqrt(AUDIOlevel) * le)
if RASPIterminalinfo == True:
txt = "Audio level (%): " + str(int(100 * AUDIOlevel))
print(txt)
n = 0
txt = ""
while(n < t and n < le):
txt = txt + "|"
n = n + 1
x = X0L
y = Y0T+GRH+55
draw.text((x,y),txt1, font=TEXTfont, fill=COLORaudiobar)
if AUDIOlevel >= 1.0:
draw.text((x,y),txt, font=TEXTfont, fill=COLORaudiomax)
else:
draw.text((x,y),txt, font=TEXTfont, fill=COLORaudiook)
# Place of text after bargraph
w = TEXTfont.getsize(txt1)
x = X0L + w[0] + 20
# Runstatus and level information
if (RUNstatus == 0) or (RUNstatus == 3):
txt = "Stopped"
else:
txt = "Running"
if AUDIOstatus == 1:
txt = txt + " Audio on"
else:
txt = txt + " Audio off"
txt = txt + " Display " + str(DISPLAY) + " C=" + str(Contrast) + " B=" + str(Brightness)
if FFTaverage > 1:
txt = txt + " AVG=" + str(FFTaverage)
if NOISEblankerlevel == 0:
txt = txt + " Noise blanker off"
else:
txt = txt + " Noise blanker level: " + str(NOISEblankerlevel)
if NOISEblankeractive == True: # Noise blanker active
txt = txt + "*"
if RASPIcputemperature == True:
try:
temp = os.popen("vcgencmd measure_temp" ).readline()
txt = txt + " Raspberry Pi CPU " + temp
except:
pass
txt = txt + " Buffer (%): " + str(int(RXbuffermin / 2)) + " - " + str(int(RXbuffermax / 2))
y = Y0T+GRH+58
draw.text((x,y),txt, font=TEXTfont, fill=COLORtext)
if RASPIterminalinfo == True:
print(txt)
x = X0L
y = Y0T+GRH+77
txt = FTPmessage + " " + SNAPshotmessage
draw.text((x,y),txt, font=TEXTfont, fill=COLORtext)
if RASPIterminalinfo == True:
print(txt)
# Program version
w = TEXTfont.getsize(LoporaName)
x = CANVASwidth - w[0] - 10
draw.text((x,y),LoporaName, font=TEXTfont, fill=COLORtext)
# Frequency scale
Fpixel = (float(SAMPLErate / 2) / (SMPfftpwrTwo / 2 - 1)) * ZOOMfactor # Frequency step per pixel
Pdiv = Vdiv / Fpixel # Pixels per division
FR = STARTfrequency
i = GRH
while (i > 0):
text = str(FR)
w = TEXTfont.getsize(text)
x = CANVASwidth - w[0]
y = i + Y0T - w[1] - 3
xa = x + w[0] + 1
ya = y + w[1]
if y >= 0:
draw.rectangle((x,y,xa,ya), fill=COLORcanvas) # Canvas color window
draw.text((x,y),text, font=TEXTfont, fill=COLORfrequency)
FR = FR + Vdiv
i = i - Pdiv
del draw # Delete the draw item
if DEBUG == 2:
T2=time.time()
print("UpdateText: " + str(T2-T1))
SCREENrefresh() # Refresh the screen
def MakeTrace():
global AUDIOsignal1
global CANVASheight
global CANVASwidth
global COLORcanvas
global COLORtext
global Contrast
global DEBUG
global DISPLAYblue
global DISPLAYgreen
global DISPLAYred
global Dwidth
global FFTline
global FFTresult
global GRH # Screenheight
global GRW # Screenwidth
global LOPshotnow
global MARKERtype
global RUNstatus
global THEimage
global TRACEh
global TRACEm
global X0L # Left top X value
global Y0T # Left top Y value
global SCREENupdate
global SCREENupdatecounter
global ZEROpadding
global ZEROpaddingvalue
global ZOOMfactor
T1=time.time()
# Open a draw item for THEimage
draw = ImageDraw.Draw(THEimage)
# Set the TRACEsize variable
TRACEsize = len(FFTresult) # Set the trace length, last two values are line and marker type
X1 = int(X0L + FFTline * Dwidth)
Y1 = Y0T + GRH # Start at bottom
Cvalue = 2.0 ** (float(Contrast) / 2) # Max. 1024x gain for contrast
Dsample = 0 # Pointer in FFTresult[]
Dstep = ZEROpaddingvalue * ZOOMfactor # Step for Data pointer
n = 0
while n < GRH:
if (Dsample + Dstep) < TRACEsize:
v = FFTresult[int(Dsample)]
m = 1
while m < Dstep: # Peak value from more samples if zero padding or zoom active
try: # Try for boundary overload error catching
v1 = FFTresult[int(Dsample+m)]
except:
v1 = 0
if v1 > v:
v = v1
m = m + 1
v = v * Cvalue # Multiply | |
<reponame>DerekGloudemans/3D-detector-trials
import os
import re
import pickle
import cv2
import ast
from numpy import array
#from parameters import *
def get_precomputed_checksums(abs_path=None):
path = './resources/timestamp_pixel_checksum_6.pkl'
if abs_path is not None:
path = abs_path
with open(path, 'rb') as pf:
dig_cs6 = pickle.load(pf)
return dig_cs6
def get_timestamp_geometry(abs_path=None):
path = './resources/timestamp_geometry_4K.pkl'
if abs_path is not None:
path = abs_path
with open(path, 'rb') as pf:
g = pickle.load(pf)
return g
def get_timestamp_pixel_limits():
"""
Provides x/y coordinates (only) for timestamp pixel extraction. Note that return order is y1, y2, x1, x2. Timestamp
can be extracted from full frame like so: `timestamp_pixels = frame_pixels[y1:y2, x1:x2, :]`
:return: y-start (y1), y-stop (y2), x-start (x1), x-stop (x2)
"""
geom = get_timestamp_geometry()
y0 = geom['y0']
x0 = geom['x0']
h = geom['h']
n = geom['n']
w = geom['w']
return y0, y0+h, x0, x0+(n*w)
def parse_frame_timestamp(timestamp_geometry, precomputed_checksums, frame_pixels=None, timestamp_pixels=None):
"""
Use pixel checksum method to parse timestamp from video frame. First extracts timestamp area from frame
array. Then converts to gray-scale, then converts to binary (black/white) mask. Each digit
(monospaced) is then compared against the pre-computed pixel checksum values for an exact match.
:param timestamp_geometry: dictionary of parameters used for determining area of each digit in checksum
(load using utilities.get_timestamp_geometry)
:param precomputed_checksums: dictionary of checksum:digit pairs (load using utilities.get_precomputed_checksums())
:param frame_pixels: numpy array of full (4K) color video frame; dimensions should be 2160x3840x3
:param timestamp_pixels: numpy array of timestamp area, defined by `get_timestamp_pixel_limits()`
:return: timestamp (None if checksum error), pixels from error digit (if no exact checksum match)
"""
# extract geometry values from timestamp_geometry dictionary
g = timestamp_geometry
w = g['w']
h = g['h']
x0 = g['x0']
y0 = g['y0']
n = g['n']
h13 = g['h13']
h23 = g['h23']
h12 = g['h12']
w12 = g['w12']
dig_cs6 = precomputed_checksums
if frame_pixels is not None:
# extract the timestamp in the x/y directions
tsimg = frame_pixels[y0:(y0+h), x0:(x0+(n*w)), :]
elif timestamp_pixels is not None:
tsimg = timestamp_pixels
else:
raise ValueError("One of `frame_pixels` or `timestamp_pixels` must be specified.")
# convert color to gray-scale
tsgray = cv2.cvtColor(tsimg, cv2.COLOR_BGR2GRAY)
# convert to black/white binary mask using fixed threshold (1/2 intensity)
# observed gray values on the edges of some digits in some frames were well below this threshold
ret, tsmask = cv2.threshold(tsgray, 127, 255, cv2.THRESH_BINARY)
# parse each of the `n` digits
ts_dig = []
for j in range(n):
# disregard the decimal point in the UNIX time (always reported in .00 precision)
if j == 10:
ts_dig.append('.')
continue
# extract the digit for this index, was already cropped x0:x0+n*w, y0:y0+h
pixels = tsmask[:, j*w:(j+1)*w]
# compute the 6-area checksum and convert it to an array
cs = [[int(pixels[:h13, :w12].sum() / 255), int(pixels[:h13, w12:].sum() / 255)],
[int(pixels[h13:h23, :w12].sum() / 255), int(pixels[h13:h23, w12:].sum() / 255)],
[int(pixels[h23:, :w12].sum() / 255), int(pixels[h23:, w12:].sum() / 255)]
]
cs = array(cs)
# compute the absolute difference between this digit and each candidate
cs_diff = [(dig, abs(cs - cs_ref).sum()) for dig, cs_ref in dig_cs6.items()]
pred_dig, pred_err = min(cs_diff, key=lambda x: x[1])
# looking for a perfect checksum match; testing showed this was reliable
if pred_err > 0:
# if no exact match, return no timestamp and the pixel values that resulted in the error
return None, pixels
else:
ts_dig.append(pred_dig)
# convert the list of strings into a number and return successful timestamp
return ast.literal_eval(''.join(map(str, ts_dig))), None
def parse_config_file(config_file):
"""
Parses an entire session configuration file into sections (in this order): cameras, image snapshot, video snapshot,
and recording.
:param config_file: path to configuration file
:return: configuration dictionaries of key-value pairs; list of dicts for cameras section, single dict for others.
"""
camera_config = []
image_snap_config = []
video_snap_config = []
recording_config = []
block_mapping = {'__CAMERA__': camera_config,
'__IMAGE-SNAPSHOT__': image_snap_config,
'__VIDEO-SNAPSHOT__': video_snap_config,
'__PERSISTENT-RECORDING__': recording_config}
# open configuration file and parse it out
with open(config_file, 'r') as f:
current_block = None
block_destination = None
for line in f:
# ignore empty lines and comment lines
if line is None or len(line.strip()) == 0 or line[0] == '#':
continue
strip_line = line.strip()
if len(strip_line) > 2 and strip_line[:2] == '__' and strip_line[-2:] == '__':
# this is a configuration block line
# first check if this is the first one or not
if block_destination is not None and len(current_block) > 0:
# add the block to its destination if it's non-empty
block_destination.append(current_block)
# reset current block to empty and set its destination
current_block = {}
block_destination = block_mapping[strip_line]
elif '==' in strip_line:
pkey, pval = strip_line.split('==')
current_block[pkey.strip()] = pval.strip()
else:
raise AttributeError("""Got a line in the configuration file that isn't a block header nor a
key=value.\nLine: {}""".format(strip_line))
# add the last block of the file (if it's non-empty)
if block_destination is not None and len(current_block) > 0:
block_destination.append(current_block)
# check number of configuration blocks for these configs
if len(image_snap_config) > 1:
raise AttributeError("More than one configuration block found for __IMAGE-SNAPSHOT__.")
elif len(image_snap_config) == 1: # had one config block
image_snap_config = image_snap_config[0]
if len(video_snap_config) > 1:
raise AttributeError("More than one configuration block found for __VIDEO-SNAPSHOT__.")
elif len(video_snap_config) == 1: # had one config block
video_snap_config = video_snap_config[0]
if len(recording_config) > 1:
raise AttributeError("More than one configuration block found for __PERSISTENT-RECORDING__.")
elif len(recording_config) == 1: # had one config block
recording_config = recording_config[0]
# send back configs
return camera_config, image_snap_config, video_snap_config, recording_config
def get_session_start_time_local(session_info_filename):
"""
Finds the local time at which the session was started, according to the _SESSION_INFO.txt file.
:param session_info_filename: path to session info filename
:return: datetime.datetime object representing local time at which session was initialized
"""
import datetime
with open(session_info_filename, 'r') as f:
for line in f:
if line.startswith("Session initialization time (local): "):
ts = line.strip("Session initialization time (local): ")
ts = datetime.datetime.strptime(ts.strip(), "%Y-%m-%d %H:%M:%S.%f")
break
else:
raise ValueError("Couldn't find line with timestamp.")
return ts
def get_sesssion_recording_segment_time(session_info_filename):
"""
Finds the duration of video segments for perpetual recording during this session. This info is in the _SESSION_INFO
file, as well as the _SESSION_CONFIG file. Pulled from INFO file for convenience in this case.
:param session_info_filename: path to session info filename
:return: number of minutes for video segments
"""
with open(session_info_filename, 'r') as f:
for line in f:
if line.startswith("Recording segment duration: "):
rst = float(line.strip("Recording segment duration: "))
break
else:
raise ValueError("Couldn't find line with recording duration.")
return rst
def get_session_number(session_info_filename=None):
"""
Finds the session number, according to the _SESSION_INFO.txt file.
:param session_info_filename: path to session info filename
:return: session number (integer)
"""
with open(session_info_filename, 'r') as f:
for line in f:
if line.startswith("SESSION #"):
sn = line.strip("SESSION #")
break
else:
raise ValueError("Couldn't find line with session number.")
return int(sn)
def get_recording_params(session_root_directory, session_number=None, camera_configs=None, recording_config=None,verbose = True):
"""
Determine relevant parameters from video ingest session configuration: list of recording directories where video
files are stored (corresponding to each camera, might be the same), file name formatter (corresponding to each
camera, might be the same), list of camera names. Note: length of all return lists = number of cameras.
Providing configuration dictionaries as inputs is optional; if either is left as None, the the session
configuration will be loaded and parsed automatically from _SESSION_CONFIG.config. Providing session_number is
also optional; it will be loaded from _SESSION_INFO.txt if not provided.
:param session_root_directory: directory of video ingest session, which contains automatic copy of config file
:param session_number: (optional) session number corresponding to this directory
:param camera_configs: (optional) list of camera configuration dictionaries (used to get camera names)
:param recording_config: (optional) recording configuration dictionary (used to get recording file name format)
:param verbose: bool - allow or supress function print statements
:return: list of recording directories for each camera, file name format for each camera, list of camera names
"""
if camera_configs is None or recording_config is None:
if verbose: print("Loading configuration file instead of using configuration input arguments.")
camera_configs, _, _, recording_config = parse_config_file(
config_file=os.path.join(session_root_directory, "_SESSION_CONFIG.config"))
if session_number is None:
session_number = get_session_number(
session_info_filename=os.path.join(session_root_directory, DEFAULT_SESSION_INFO_FILENAME))
# get camera names | |
<reponame>shirubana/bifacialvf
# -*- coding: utf-8 -*-
"""
ViewFactor module - VF calculation helper files for bifacial-viewfactor
@author <NAME>
@translated to python by sayala 06/09/17
"""
# ensure python3 compatible division and printing
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from sun import solarPos, sunIncident, perezComp, aOIcorrection
import logging
# TODO: set level or add formatters if more advanced logging required
LOGGER = logging.getLogger(__name__) # only used to raise errors
DTOR = math.pi / 180.0 # Factor for converting from degrees to radians
def getBackSurfaceIrradiances(rowType, maxShadow, PVbackSurface, beta, sazm,
dni, dhi, C, D, albedo, zen, azm, cellRows,
pvBackSH, rearGroundGHI, frontGroundGHI,
frontReflected, offset=0):
"""
This method calculates the AOI corrected irradiance on the back of the PV
module/panel. 11/19/2015
Added rowType and other changes to distinguish between types of rows.
4/19/2016
Added input of offset of reference cell from PV module back (in PV panel
slope lengths) for modeling Sara's reference cell measurements, should be
set to zero for PV module cell irradiances.
Added while loop so projected Xs aren't too negative causing array index
problems (<0) 12/13/2016::
while (projectedX1 < -100.0 || projectedX2 < -100.0):
# Offset so array indexes are >= -100.0 12/13/2016
projectedX1 += 100.0;
projectedX2 += 100.0;
Parameters
----------
rowType : str
Type of row: "first", "interior", "last", or "single"
maxShadow
Maximum shadow length projected to the front(-) or rear (+) from the
front of the module
PVbackSurface
PV module back surface material type, either "glass" or "ARglass"
beta
Tilt from horizontal of the PV modules/panels (deg) (for front surface)
sazm
Surface azimuth of PV panels (deg) (for front surface)
dni
Direct normal irradiance (W/m2)
dhi
Diffuse horizontal irradiance (W/m2)
C
Ground clearance of PV panel (in PV panel slope lengths)
D
Horizontal distance between rows of PV panels (in PV panel slope
lengths)
albedo
Ground albedo
zen
Sun zenith (in radians)
azm
Sun azimuth (in radians)
pvBackSH
Decimal fraction of the back surface of the PV panel that is shaded,
0.0 to 1.0
rearGroundGHI : array of size [100]
Global horizontal irradiance for each of 100 ground segments (W/m2)
frontGroundGHI : array of size [100]
Global horizontal irradiance for each of 100 ground segments (W/m2)
frontReflected : array of size [cellRows]
Irradiance reflected from the front of the PV module/panel (W/m2) in
the row behind the one of interest
offset
Offset of reference cell from PV module back (in PV panel slope
lengths), set to zero for PV module cell irradiances
Returns
-------
backGTI : array of size [cellRows]
AOI corrected irradiance on back side of PV module/panel, one for each
cell row (W/m2)
aveGroundGHI : numeric
Average GHI on ground under PV array
Notes
-----
1-degree hemispherical segment AOI correction factor for glass (index=0)
and ARglass (index=1)
"""
backGTI = []
SegAOIcor = [
[0.057563, 0.128570, 0.199651, 0.265024, 0.324661, 0.378968, 0.428391, 0.473670, 0.514788, 0.552454,
0.586857, 0.618484, 0.647076, 0.673762, 0.698029, 0.720118, 0.740726, 0.759671, 0.776946, 0.792833,
0.807374, 0.821010, 0.833534, 0.845241, 0.855524, 0.865562, 0.874567, 0.882831, 0.890769, 0.897939,
0.904373, 0.910646, 0.916297, 0.921589, 0.926512, 0.930906, 0.935179, 0.939074, 0.942627, 0.946009,
0.949096, 0.952030, 0.954555, 0.957157, 0.959669, 0.961500, 0.963481, 0.965353, 0.967387, 0.968580,
0.970311, 0.971567, 0.972948, 0.974114, 0.975264, 0.976287, 0.977213, 0.978142, 0.979057, 0.979662,
0.980460, 0.981100, 0.981771, 0.982459, 0.982837, 0.983199, 0.983956, 0.984156, 0.984682, 0.985026,
0.985364, 0.985645, 0.985954, 0.986241, 0.986484, 0.986686, 0.986895, 0.987043, 0.987287, 0.987388,
0.987541, 0.987669, 0.987755, 0.987877, 0.987903, 0.987996, 0.988022, 0.988091, 0.988104, 0.988114,
0.988114, 0.988104, 0.988091, 0.988022, 0.987996, 0.987903, 0.987877, 0.987755, 0.987669, 0.987541,
0.987388, 0.987287, 0.987043, 0.986895, 0.986686, 0.986484, 0.986240, 0.985954, 0.985645, 0.985364,
0.985020, 0.984676, 0.984156, 0.983956, 0.983199, 0.982837, 0.982459, 0.981771, 0.981100, 0.980460,
0.979662, 0.979057, 0.978142, 0.977213, 0.976287, 0.975264, 0.974114, 0.972947, 0.971567, 0.970311,
0.968580, 0.967387, 0.965353, 0.963481, 0.961501, 0.959671, 0.957157, 0.954555, 0.952030, 0.949096,
0.946009, 0.942627, 0.939074, 0.935179, 0.930906, 0.926512, 0.921589, 0.916297, 0.910646, 0.904373,
0.897939, 0.890769, 0.882831, 0.874567, 0.865562, 0.855524, 0.845241, 0.833534, 0.821010, 0.807374,
0.792833, 0.776946, 0.759671, 0.740726, 0.720118, 0.698029, 0.673762, 0.647076, 0.618484, 0.586857,
0.552454, 0.514788, 0.473670, 0.428391, 0.378968, 0.324661, 0.265024, 0.199651, 0.128570, 0.057563],
[0.062742, 0.139913, 0.216842, 0.287226, 0.351055, 0.408796, 0.460966, 0.508397, 0.551116, 0.589915,
0.625035, 0.657029, 0.685667, 0.712150, 0.735991, 0.757467, 0.777313, 0.795374, 0.811669, 0.826496,
0.839932, 0.852416, 0.863766, 0.874277, 0.883399, 0.892242, 0.900084, 0.907216, 0.914023, 0.920103,
0.925504, 0.930744, 0.935424, 0.939752, 0.943788, 0.947313, 0.950768, 0.953860, 0.956675, 0.959339,
0.961755, 0.964039, 0.965984, 0.967994, 0.969968, 0.971283, 0.972800, 0.974223, 0.975784, 0.976647,
0.977953, 0.978887, 0.979922, 0.980773, 0.981637, 0.982386, 0.983068, 0.983759, 0.984436, 0.984855,
0.985453, 0.985916, 0.986417, 0.986934, 0.987182, 0.987435, 0.988022, 0.988146, 0.988537, 0.988792,
0.989043, 0.989235, 0.989470, 0.989681, 0.989857, 0.990006, 0.990159, 0.990263, 0.990455, 0.990515,
0.990636, 0.990731, 0.990787, 0.990884, 0.990900, 0.990971, 0.990986, 0.991042, 0.991048, 0.991057,
0.991057, 0.991048, 0.991042, 0.990986, 0.990971, 0.990900, 0.990884, 0.990787, 0.990731, 0.990636,
0.990515, 0.990455, 0.990263, 0.990159, 0.990006, 0.989857, 0.989681, 0.989470, 0.989235, 0.989043,
0.988787, 0.988532, 0.988146, 0.988022, 0.987435, 0.987182, 0.986934, 0.986417, 0.985916, 0.985453,
0.984855, 0.984436, 0.983759, 0.983068, 0.982386, 0.981637, 0.980773, 0.979920, 0.978887, 0.977953,
0.976647, 0.975784, 0.974223, 0.972800, 0.971284, 0.969970, 0.967994, 0.965984, 0.964039, 0.961755,
0.959339, 0.956675, 0.953860, 0.950768, 0.947313, 0.943788, 0.939752, 0.935424, 0.930744, 0.925504,
0.920103, 0.914023, 0.907216, 0.900084, 0.892242, 0.883399, 0.874277, 0.863766, 0.852416, 0.839932,
0.826496, 0.811669, 0.795374, 0.777313, 0.757467, 0.735991, 0.712150, 0.685667, 0.657029, 0.625035,
0.589915, 0.551116, 0.508397, 0.460966, 0.408796, 0.351055, 0.287226, 0.216842, 0.139913, 0.062742]]
# Tilt from horizontal of the PV modules/panels, in radians
beta = beta * DTOR
sazm = sazm * DTOR # Surface azimuth of PV module/panels, in radians
# 1. Calculate and assign various paramters to be used for modeling
# irradiances
# For calling PerezComp to break diffuse into components for zero tilt
# (horizontal)
iso_dif = 0.0; circ_dif = 0.0; horiz_dif = 0.0; grd_dif = 0.0; beam = 0.0
# Call to get iso_dif for horizontal surface
ghi, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(
dni, dhi, albedo, zen, 0.0, zen)
# Isotropic irradiance from sky on horizontal surface, used later for
# determining isotropic sky component
iso_sky_dif = iso_dif
# For calling PerezComp to break diffuse into components for 90 degree tilt
# (vertical)
inc, tiltr, sazmr = sunIncident(0, 90.0, 180.0, 45.0, zen, azm)
# Call to get horiz_dif for vertical surface
vti, iso_dif, circ_dif, horiz_dif, grd_dif, beam = perezComp(
dni, dhi, albedo, inc, tiltr, zen)
# Horizon diffuse irradiance on a vertical surface, used later for
# determining horizon brightening irradiance component
F2DHI = horiz_dif
index = -99
n2 = -99.9
if (PVbackSurface == "glass"):
# Index to use with 1-degree hemispherical segment AOI correction
# factor array
index = 0
n2 = 1.526 # Index of refraction for glass
elif (PVbackSurface == "ARglass"):
# Index to use with 1-degree hemispherical segment AOI correction
# factor array
index = 1
n2 = 1.300 # Index of refraction for ARglass
else:
raise Exception(
"Incorrect text input for PVbackSurface."
" Must be glass or ARglass.")
# Reflectance at normal incidence, Duffie and Beckman p217
Ro = math.pow((n2 - 1.0) / (n2 + 1.0), 2.0)
# Average GHI on ground under PV array for cases when x projection exceed
# 2*rtr
aveGroundGHI = 0.0
for i in range(0,100):
aveGroundGHI += rearGroundGHI[i] / 100.0
# Calculate x,y coordinates of bottom and top edges of PV row in back of desired PV row so that portions of sky and ground viewed by the
# PV cell may be determined. Origin of x-y axis is the ground pobelow the lower front edge of the desired PV row. The row in back of
# the desired row is in the positive x direction.
h = math.sin(beta); # Vertical height of sloped PV panel (in PV panel slope lengths)
x1 = math.cos(beta); # Horizontal distance from front of panel to rear of panel (in PV panel slope lengths)
rtr = D + x1; # Row-to-row distance (in PV panel slope lengths)
PbotX = rtr; # x value for poon bottom egde of PV module/panel of | |
U1G=-U1*spsp.psi(-1/H-G)
U2G=-U2*spsp.psi(-2/H-G)
U3G=-U3*spsp.psi(-3/H-G)
U4G=-U4*spsp.psi(-4/H-G)
U1H= RHH*(-U1G-U1*spsp.psi(-1/H+1))
U2H=2*RHH*(-U2G-U2*spsp.psi(-2/H+1))
U3H=3*RHH*(-U3G-U3*spsp.psi(-3/H+1))
U4H=4*RHH*(-U4G-U4*spsp.psi(-4/H+1))
DL2G=U1G-2*U2G
DL2H=U1H-2*U2H
DL3G=-U1G+6*U2G-6*U3G
DL3H=-U1H+6*U2H-6*U3H
DL4G=U1G-12*U2G+30*U3G-20*U4G
DL4H=U1H-12*U2H+30*U3H-20*U4H
D11=(DL3G-TAU3*DL2G)/ALAM2
D12=(DL3H-TAU3*DL2H)/ALAM2
D21=(DL4G-TAU4*DL2G)/ALAM2
D22=(DL4H-TAU4*DL2H)/ALAM2
DET=D11*D22-D12*D21
H11= D22/DET
H12=-D12/DET
H21=-D21/DET
H22= D11/DET
DEL1=E1*H11+E2*H12
DEL2=E1*H21+E2*H22
## TAKE NEXT N-R STEP
G=XG-DEL1
H=XH-DEL2
Z=G+H*0.725
## REDUCE STEP IF G AND H ARE OUTSIDE THE PARAMETER SPACE
FACTOR=1
if G <= -1:
FACTOR = 0.8*(XG+1)/DEL1
if H <= -1:
FACTOR = min(FACTOR,0.8*(XH+1)/DEL2)
if Z <= -1:
FACTOR = min(FACTOR,0.8*(XZ+1)/(XZ-Z))
if H <= 0 and G*H<= -1:
FACTOR = min(FACTOR,0.8*(XG*XH+1)/(XG*XH-G*H))
if FACTOR == 1:
pass
else:
DEL1 = DEL1*FACTOR
DEL2 = DEL2*FACTOR
G = XG-DEL1
H = XH-DEL2
Z = G+H*0.725
#############################################################
def pelnor(xmom):
if xmom[1] <= 0:
print("L-Moments Invalid")
return
else:
para = [xmom[0],xmom[1]*sp.sqrt(sp.pi)]
return(para)
#############################################################
def pelpe3(xmom):
Small = 1e-6
#Constants used in Minimax Approx:
C1 = 0.2906
C2 = 0.1882
C3 = 0.0442
D1 = 0.36067
D2 = -0.59567
D3 = 0.25361
D4 = -2.78861
D5 = 2.56096
D6 = -0.77045
T3=abs(xmom[2])
if xmom[1] <= 0 or T3 >= 1:
para = [0]*3
print("L-Moments Invalid")
return(para)
if T3<= Small:
para = []
para.append(xmom[0])
para.append(xmom[1]*sp.sqrt(sp.pi))
para.append(0)
return(para)
if T3 >= (1.0/3):
T = 1-T3
Alpha = T*(D1+T*(D2+T*D3))/(1+T*(D4+T*(D5+T*D6)))
else:
T=3*sp.pi*T3*T3
Alpha=(1+C1*T)/(T*(1+T*(C2+T*C3)))
RTALPH=sp.sqrt(Alpha)
BETA=sp.sqrt(sp.pi)*xmom[1]*sp.exp(sp.special.gammaln(Alpha)-sp.special.gammaln(Alpha+0.5))
para = []
para.append(xmom[0])
para.append(BETA*RTALPH)
para.append(2/RTALPH)
if xmom[2] < 0:
para[2]=-para[2]
return(para)
#############################################################
def pelwak(xmom):
iFail = 0
FitPareto = 0
tryxiiszero = 0
if abs(xmom[1]) <= 0:
iFail=3
if abs(xmom[2]) > 1:
iFail=3
if abs(xmom[3]) > 1:
iFail=3
if abs(xmom[4]) > 1:
iFail=3
if iFail ==3:
print("L-Moments Invalid")
para = [0]*5
return para
iFail = 0
#CALCULATE THE L-MOMENTS (LAMBDA'S)
alam1 = xmom[0]
alam2 = xmom[1]
alam3 = xmom[2]*alam2
alam4 = xmom[3]*alam2
alam5 = xmom[4]*alam2
#ESTIMATE N1,N2,N3,C1,C2,C3 WHEN XI.NE.0
XN1= 3*alam2-25*alam3 +32*alam4
XN2=-3*alam2 +5*alam3 +8*alam4
XN3= 3*alam2 +5*alam3 +2*alam4
XC1= 7*alam2-85*alam3+203*alam4-125*alam5
XC2=-7*alam2+25*alam3 +7*alam4 -25*alam5
XC3= 7*alam2 +5*alam3 -7*alam4 -5*alam5
#Estimate B and D
XA=XN2*XC3-XC2*XN3
XB=XN1*XC3-XC1*XN3
XC=XN1*XC2-XC1*XN2
Disc=XB*XB-4*XA*XC
tryxiiszero = 0
if Disc < 0:
tryxiiszero = 1
else:
Disc=sp.sqrt(Disc)
ROOT1=0.5*(-XB+Disc)/XA
ROOT2=0.5*(-XB-Disc)/XA
B= max(ROOT1,ROOT2)
D=-min(ROOT1,ROOT2)
if D >= 1:
tryxiiszero = 1
else:
A=(1+B)*(2+B)*(3+B)/(4*(B+D))*((1+D)*alam2-(3-D)*alam3)
C=-(1-D)*(2-D)*(3-D)/(4*(B+D))*((1-B)*alam2-(3+B)*alam3)
XI=alam1-A/(1+B)-C/(1-D)
success = 0
if C >= 0 and (A+C)>= 0:
success = 1
## CAN'T FIND VALID ESTIMATES FOR XI UNRESTRICTED, SO TRY XI=0
## ESTIMATE B AND D FOR XI=0
if tryxiiszero == 1:
iFail=1
XI=0
ZN1=4*alam1-11*alam2+9*alam3
ZN2=-alam2+3*alam3
ZN3=alam2+alam3
ZC1=10*alam1-29*alam2+35*alam3-16*alam4
ZC2=-alam2+5*alam3-4*alam4
ZC3=alam2-alam4
ZA=ZN2*ZC3-ZC2*ZN3
ZB=ZN1*ZC3-ZC1*ZN3
ZC=ZN1*ZC2-ZC1*ZN2
Disc=ZB*ZB-4*ZA*ZC
FitPareto = 0
if Disc < 0:
FitPareto = 1
else:
Disc=sp.sqrt(Disc)
ROOT1=0.5*(-ZB+Disc)/ZA
ROOT2=0.5*(-ZB-Disc)/ZA
B= max(ROOT1,ROOT2)
D=-min(ROOT1,ROOT2)
if D >= 1:
FitPareto = 1
else:
## ESTIMATE A AND C
A= (1+B)*(2+B)/(B+D)*(alam1-(2-D)*alam2)
C=-(1-D)*(2-D)/(B+D)*(alam1-(2+B)*alam2)
if C >= 0 and (A+C) >= 0:
success = 1
if FitPareto == 1:
iFail=2
D=-(1-3*xmom[2])/(1+xmom[2])
C=(1-D)*(2-D)*xmom[1]
B=0
A=0
XI=xmom[0]-C/(1-D)
if D > 0:
success = 1
else:
A=C
B=-D
C=0
D=0
if success == 1:
para = []
para.append(XI)
para.append(A)
para.append(B)
para.append(C)
para.append(D)
return(para)
#############################################################
def pelwei(lmom):
if len(lmom) < 3:
print("Insufficient L-Moments: Need 3")
return
if lmom[1] <= 0 or lmom[2] >= 1:
print("L-Moments Invalid")
return
pg = pelgev.pelgev([-lmom[0],lmom[1],-lmom[2]])
delta = 1/pg[2]
beta = pg[1]/pg[2]
out = [-pg[0]-beta,beta,delta]
return(out)
#############################################################
##QUARTILE FUNCTIONS
#############################################################
def quaexp(F,para):
U = para[0]
A = para[1]
if A <= 0:
print("Parameters Invalid")
return
if F <= 0 or F >= 1:
print("F Value Invalid")
return
QUAEXP = U-A*sp.log(1-F)
return(QUAEXP)
#############################################################
def quagam(F,para):
EPS = 1e-10
maxit = 30
QUAGAM = 0
Alpha = para[0]
Beta = para[1]
if Alpha <= 0 or Beta <= 0:
print("Parameters Invalid")
return
if F<=0 or F>= 1:
print("F Value Invalid")
return
AM1 = Alpha - 1
if AM1 != 0:
DLOGG = spsp.gammaln(Alpha)
if AM1 <= 0:
Root = sp.exp((sp.log(Alpha*F)+DLOGG)/Alpha)
else:
Root = Alpha*(1-1/(9*Alpha) + quastn.quastn(F)/sp.sqrt(9*Alpha))**3
if Root <= 0.01*Alpha:
Root = sp.exp((sp.log(Alpha*F)+DLOGG)/Alpha)
for it in range(1,maxit+1):
FUNC = spsp.gammainc(Alpha,Root)-F
RINC = FUNC*sp.exp(DLOGG+Root-AM1*sp.log(Root))
Root = Root-RINC
if abs(FUNC) <= EPS:
QUAGAM = Root*Beta
return(QUAGAM)
else:
QUAGAM = -sp.log(1-F)*Beta
return(QUAGAM)
print("Result failed to converge")
return
#############################################################
def quagev(F,para):
U = para[0]
A = para[1]
G = para[2]
if A <= 0:
print("Parameters Invalid")
return
if F <= 0 or F >= 1:
if F == 0 and G < 0:
QUAGEV = U+A/G
elif F == 1 and G > 0:
QUAGEV = U+A/G
else:
print("F Value Invalid")
return
print("F Value Invalid")
return
else:
Y = -sp.log(-sp.log(F))
if G != 0:
Y = (1-sp.exp(-G*Y))/G
QUAGEV = U+A*Y
return(QUAGEV)
#############################################################
def quaglo(F,para):
U = para[0]
A = para[1]
G = para[2]
if A <= 0:
print("Invalid Parameters")
return
if F <= 0 or F >= 1:
if F == 0 and G < 0:
QUAGLO = U+A/G
return(QUAGLO)
elif F == 1 and G > 0:
QUAGLO = U+A/G
return(QUAGLO)
else:
print("F Value Invalid")
return
Y = sp.log(F/(1-F))
if G != 0:
Y = (1-sp.exp(-G*Y))/G
QUAGLO = U+A*Y
return(QUAGLO)
#############################################################
def quagno(F,para):
U = para[0]
A = para[1]
G = para[2]
if A <= 0:
print("Invalid Parameters")
return
if F <= 0 or F >= 1:
if F == 0 and G < 0:
QUAGNO = U+A/G
return(QUAGNO)
elif F == 1 and G > 0:
QUAGNO = U+A/G
return(QUAGNO)
else:
print("F Value Invalid")
return
Y = quastn.quastn(F)
if G != 0:
Y = (1-sp.exp(-G*Y))/G
QUAGNO = U+A*Y
return(QUAGNO)
#############################################################
def quagpa(F,para):
U = para[0]
A = para[1]
G = para[2]
if A <= 0:
print("Invalid parameters")
return
if F <= 0 or F >= 1:
if F == 0:
QUAGPA = U
return(QUAGPA)
elif F == 1 and G > 0:
QUAGPA = U + A/G
return(QUAGPA)
else:
print("F Value Invalid")
return
Y = -sp.log(1-F)
if G !=0:
Y = (1-sp.exp(-G*Y))/G
QUAGPA = U+A*Y
return(QUAGPA)
#############################################################
def quagum(F,para):
U = para[0]
A = para[1]
if A <= 0:
print("Parameters Invalid")
return
if F <= 0 or F >= 1:
print("F Value Invalid")
return
QUAGUM = U-A*sp.log(-sp.log(F))
return(QUAGUM)
#############################################################
def quakap(F,para):
U = para[0]
A = para[1]
G = para[2]
H = para[3]
if A <= 0:
print("Invalid Parameters")
return
if F <= 0 or F>= 1:
if F==0:
if H<=0 and G < 0:
QUAKAP = U+A/G
if H<= 0 and G>= 0:
print("F Value Invalid")
return
if H > 0 and G!= 0:
QUAKAP = U+A/G*(1-H**(-G))
if H > 0 and G == 0:
QUAKAP = U+A*sp.log(H)
return(QUAKAP)
if F == 1:
if G <= 0:
print("F Value Invalid")
return
else:
QUAKAP = U+A/G
return(QUAKAP)
else:
Y = -sp.log(F)
if H!=0:
Y = (1-sp.exp(-H*Y))/H
Y = -sp.log(Y)
if G!= 0:
Y = (1-sp.exp(-G*Y))/G
QUAKAP = U+A*Y
return(QUAKAP)
#############################################################
def quanor(F,para):
if para[1] <= 0:
print("Parameters Invalid")
return
if F <= 0 or F >= 1:
print("F Value Invalid")
return
QUANOR = para[0]+para[1]*quastn.quastn(F)
return(QUANOR)
#############################################################
def quape3(F,para):
SMALL = 1e-6
if para[1]<= 0:
print("Paremters Invalid")
return
Gamma = para[2]
if F <= 0 or F >= 1:
if F == 0 and Gamma >0:
QUAPE3 = para[0]-2*para[1]/Gamma
return(QUAPE3)
elif F == 1 and Gamma < 0:
QUAPE3 = para[0]-2*para[1]/Gamma
return(QUAPE3)
else:
print("F Value Invalid")
return
if abs(Gamma) < SMALL:
QUAPE3 = para[0] + para[1]*quastn(F)
return(QUAPE3)
Alpha = 4/(Gamma*Gamma)
Beta = abs(0.5*para[1]*Gamma)
par = [Alpha,Beta]
if Gamma > 0:
QUAPE3 = para[0]-Alpha*Beta+quagam.quagam(F,par)
if Gamma < 0:
QUAPE3 = para[0]+Alpha*Beta-quagam.quagam(1-F,par)
return(QUAPE3)
#############################################################
def quastn(F):
split1 = 0.425
split2 = 5
const1 = 0.180625
const2 = 1.6
[A0,A1,A2,A3,A4,A5,A6,A7,B1,B2,B3,B4,B5,B6,B7] = [0.338713287279636661e1,
0.133141667891784377e3, 0.197159095030655144e4,
0.137316937655094611e5, 0.459219539315498715e5,
0.672657709270087009e5, 0.334305755835881281e5,
0.250908092873012267e4, 0.423133307016009113e2,
0.687187007492057908e3, 0.539419602142475111e4,
0.212137943015865959e5, 0.393078958000927106e5,
0.287290857357219427e5, 0.522649527885285456e4]
[C0,C1,C2,C3,C4,C5,C6,C7,D1,D2,D3,D4,D5,D6,D7] = [0.142343711074968358e1,
0.463033784615654530e1, 0.576949722146069141e1,
0.364784832476320461e1, 0.127045825245236838e1,
0.241780725177450612e0, 0.227238449892691846e-1,
0.774545014278341408e-3, 0.205319162663775882e1,
0.167638483018380385e1, 0.689767334985100005e0,
0.148103976427480075e0, 0.151986665636164572e-1,
0.547593808499534495e-3, 0.105075007164441684e-8]
[E0,E1,E2,E3,E4,E5,E6,E7,F1,F2,F3,F4,F5,F6,F7] = [0.665790464350110378e1,
0.546378491116411437e1, 0.178482653991729133e1,
0.296560571828504891e0, 0.265321895265761230e-1,
0.124266094738807844e-2, 0.271155556874348758e-4,
0.201033439929228813e-6, 0.599832206555887938e0,
0.136929880922735805e0, 0.148753612908506149e-1,
0.786869131145613259e-3, 0.184631831751005468e-4,
0.142151175831644589e-6, 0.204426310338993979e-14]
Q = F-0.5
if abs(Q) > split1:
R=F
if Q >= 0:
R = 1-F
if R <= 0:
print("F Value Invalid")
R = sp.sqrt(-sp.log(R))
if R > split2:
R = R - split2
QUASTN=((((((((E7*R+E6)*R+E5)*R+E4)*R+E3)*R+E2)*R+E1)*R+E0)/
(((((((F7*R+F6)*R+F5)*R+F4)*R+F3)*R+F2)*R+F1)*R+1))
if Q < 0:
QUASTN = -QUASTN
return(QUASTN)
else:
R=R-const2
QUASTN=((((((((C7*R+C6)*R+C5)*R+C4)*R+C3)*R+C2)*R+C1)*R+C0)/
(((((((D7*R+D6)*R+D5)*R+D4)*R+D3)*R+D2)*R+D1)*R+1))
if Q < 0:
QUASTN = -QUASTN
return(QUASTN)
else:
R = const1-Q*Q
QUASTN = Q*((((((((A7*R+A6)*R+A5)*R+A4)*R+A3)*R+A2)*R+A1)*R+A0)/
(((((((B7*R+B6)*R+B5)*R+B4)*R+B3)*R+B2)*R+B1)*R+1))
return(QUASTN)
#############################################################
def quawak(F,para):
ufl = -170
XI = | |
<gh_stars>1-10
# -*- coding: UTF-8 -*-
import os
import json
import csv
from utils import parse_conf_args, Configuration, path, mysql, log
class trans_goldinfo:
def __init__(self, context, configs):
log_conf = None if context.get("log") is None else context.get("log").get(configs.get("logId"))
# 初始化日志
self.logger = log.get_logger(category="trans_gold", configs=log_conf)
if log_conf is None:
self.logger.warning("trans_goldinfo未配置Log日志")
# 初始化数据库连接
self.mysqlDB = mysql(configs=context.get("mysql")[configs.get("mysqlId")])
# 初始化模板路径
self.initTemplate = context.get("init")[configs.get("initId")]
self.tradesystemid = configs.get("tradesystemid")
self.SettlementGroupID = configs.get("settlementGroupID")
self.file_instrument = "gold_instrument.csv"
self.file_marketdata = "gold_depthmarketdata.csv"
# 交易所和结算组对应关系
self.__transform()
def __transform(self):
mysqlDB = self.mysqlDB
# 查询当前交易日
sql = """SELECT tradingday FROM siminfo.t_tradesystemtradingday WHERE tradesystemid = %s"""
fc = mysqlDB.select(sql, (self.tradesystemid,))
current_trading_day = fc[0][0]
self.TradingDay = current_trading_day
self.logger.info("[trans_goldinfo] current_trading_day = %s" % current_trading_day)
# 读取csv文件
csvs = self.__check_file()
if csvs is None:
return
if csvs[0] is not None:
# ===========处理instrument.csv写入t_Instrument表==============
self.__t_Instrument(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========处理instrument.csv写入t_TradingSegmentAttr表==============
self.__t_TradingSegmentAttr(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========处理instrument.csv写入t_MarginRate表==============
self.__t_MarginRate(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========处理instrument.csv写入t_MarginRateDetail表==============
self.__t_MarginRateDetail(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========判断并写入t_InstrumentProperty表==============
self.__t_InstrumentProperty(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========判断并写入t_TransFeeRateDetail表==============
self.__t_TransFeeRateDetail(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========判断并写入__t_PriceBanding表==============
self.__t_PriceBanding(mysqlDB=mysqlDB, csv_file=csvs[0])
if csvs[1] is not None:
# ===========写入t_MarketData表 ==============
self.__t_MarketData(mysqlDB=mysqlDB, csv_file=csvs[1])
def __t_Instrument(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_Instrument where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_golds = """INSERT INTO siminfo.t_Instrument(
SettlementGroupID,ProductID,
ProductGroupID,UnderlyingInstrID,
ProductClass,PositionType,PositionDateType,
StrikePrice,OptionsType,
VolumeMultiple,UnderlyingMultiple,
InstrumentID,InstrumentName,
DeliveryYear,DeliveryMonth,AdvanceMonth
)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s)"""
sql_insert_params = []
for gold in csv_file:
sql_insert_params.append((self.SettlementGroupID, gold["ProductID"],
gold["ProductID"], gold["UnderlyingInstrID"],
gold["ProductClass"], gold["PositionType"], 2,
"0", gold["OptionsType"],
gold["VolumeMultiple"],
"0",
gold["InstrumentID"],
gold["InstrumentName"].decode(encoding='gbk', errors='ignore').encode(
encoding='utf8'),
gold["DeliveryYear"], gold["DeliveryMonth"], "012"))
cursor.executemany(sql_insert_golds, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_Instrument完成")
# 导入完成后写入产品表
self.__init_product()
def __init_product(self):
mysql_conn = self.mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
cursor.execute("delete from siminfo.t_ClientProductRight where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_MarketProduct where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_MdPubStatus where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_PartProductRight where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_PartProductRole where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_Product where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_ProductGroup where SettlementGroupID = %s", (self.SettlementGroupID,))
# t_ClientProductRight
self.logger.info("产品类型导入t_ClientProductRight")
cursor.execute("")
sql = """INSERT into siminfo.t_ClientProductRight(
SELECT SettlementGroupID,ProductID,'00000000' AS ClientID,'0' AS TradingRight
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_MarketProduct
self.logger.info("产品类型导入t_MarketProduct")
sql = """INSERT into siminfo.t_MarketProduct(
SELECT t.SettlementGroupID, t1.MarketID, t.ProductID
FROM siminfo.t_instrument t,siminfo.t_market t1
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.SettlementGroupID = %s
GROUP BY t.SettlementGroupID,t.ProductID,t1.MarketID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_MdPubStatus
self.logger.info("产品类型导入t_MdPubStatus")
sql = """INSERT into siminfo.t_MdPubStatus(
SELECT SettlementGroupID,ProductID,'3' AS InstrumentStatus,'0' AS MdPubStatus
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_PartProductRight
self.logger.info("产品类型导入t_PartProductRight")
sql = """INSERT INTO siminfo.t_PartProductRight(
SELECT SettlementGroupID,ProductID,'00000000' AS ParticipantID,'0' AS TradingRight
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_PartProductRole
self.logger.info("产品类型导入t_PartProductRole")
sql = """INSERT INTO siminfo.t_PartProductRole(
SELECT SettlementGroupID,'00000000' AS ParticipantID,ProductID,'1' AS TradingRole
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_Product
self.logger.info("产品类型导入t_Product")
sql = """INSERT INTO siminfo.t_Product(
SELECT SettlementGroupID, ProductID, ProductGroupID, '' AS ProductName,'' AS ProductClass
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID,ProductGroupID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_ProductGroup
self.logger.info("产品类型导入t_ProductGroup")
sql = """INSERT INTO siminfo.t_ProductGroup(
SELECT SettlementGroupID,ProductGroupID,'' AS ProductGroupName,ProductGroupID as CommodityID
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductGroupID,ProductGroupID)"""
cursor.execute(sql, (self.SettlementGroupID,))
mysql_conn.commit()
finally:
mysql_conn.close()
def __t_TradingSegmentAttr(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_TradingSegmentAttr where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_segment = """INSERT INTO siminfo.t_TradingSegmentAttr (
SettlementGroupID,TradingSegmentSN,
TradingSegmentName,StartTime,
InstrumentStatus,DayOffset,InstrumentID
) VALUES (%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
# 加载交易时间段数据
segment_attr = self.__loadJSON(tableName='t_TradingSegmentAttr')
if segment_attr is None:
self.logger.error("t_TradingSegmentAttr不存在")
return
SGID = self.SettlementGroupID
for gold in csv_file:
# 判断结算组是否存在
if SGID in segment_attr:
params = self.__get_segment_attr(attr=segment_attr[SGID],
instrument=gold["InstrumentID"])
sql_insert_params += params
cursor.executemany(sql_insert_segment, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_TradingSegmentAttr完成")
# 通过产品代码生成目标合约的交易时间段
def __get_segment_attr(self, attr, instrument):
gold = attr['gold']
all_trading_time = attr['tradingTime']
exist_trading_time = []
# 获取当前模版存在的产品代码
for segment in gold:
if str(instrument) in str(gold[segment]):
exist_trading_time.append(segment)
# 如果模版里面没有该产品,则取该结算组白天交易时间段
params = []
if len(exist_trading_time) == 0:
for segment in all_trading_time["day"]:
params.append((segment[0], segment[1], segment[2], segment[3], segment[4], segment[5], instrument))
else:
segment_list = []
for exist in exist_trading_time:
segment_list += all_trading_time[exist]
for segment in segment_list:
params.append((segment[0], segment[1], segment[2], segment[3], segment[4], segment[5], instrument))
return params
def __t_MarginRate(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_MarginRate')
if template is None:
self.logger.error("t_MarginRate template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_MarginRate where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_rate = """INSERT INTO siminfo.t_MarginRate (
SettlementGroupID,
MarginCalcID,
InstrumentID,
ParticipantID
) VALUES (%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append((SGID, template[SGID][1], gold["InstrumentID"], template[SGID][3]))
cursor.executemany(sql_insert_rate, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_MarginRate完成")
def __t_MarginRateDetail(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_MarginRateDetail')
if template is None:
self.logger.error("t_MarginRateDetail template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_MarginRateDetail where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_detail = """INSERT INTO siminfo.t_MarginRateDetail (
SettlementGroupID,TradingRole,HedgeFlag,
ValueMode,LongMarginRatio,ShortMarginRatio,
InstrumentID,ParticipantID,ClientID
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append(
self.__get_margin_rate_detail(attr=template[SGID],
instrument=gold["InstrumentID"]))
cursor.executemany(sql_insert_detail, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_MarginRateDetail完成")
# 通过产品代码生成目标合约的保证金率
def __get_margin_rate_detail(self, attr, instrument):
template = attr["template"]
margin_ratio = attr["marginRatio"]
# 判断产品代码是否存在于模版
if instrument in margin_ratio.keys():
params = (template[0], template[1], template[2], template[3], margin_ratio[instrument][0],
margin_ratio[instrument][1], instrument, template[9], template[10])
else:
params = (template[0], template[1], template[2], template[3], template[4], template[5], instrument,
template[9], template[10])
return params
def __t_InstrumentProperty(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
cursor.execute("delete from siminfo.t_InstrumentProperty where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_Property = """INSERT INTO siminfo.t_InstrumentProperty (
SettlementGroupID,CreateDate,OpenDate,ExpireDate,StartDelivDate,
EndDelivDate,BasisPrice,MaxMarketOrderVolume,MinMarketOrderVolume,
MaxLimitOrderVolume,MinLimitOrderVolume,PriceTick,
AllowDelivPersonOpen,InstrumentID,InstLifePhase
)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
sql_params.append((SGID, gold["CreateDate"], gold["OpenDate"], gold["ExpireDate"],
gold["StartDelivDate"], gold["EndDelivDate"], 0,
gold["MaxMarketOrderVolume"],
gold["MinMarketOrderVolume"],
gold["MaxLimitOrderVolume"],
gold["MinLimitOrderVolume"],
gold["PriceTick"],
0, gold["InstrumentID"], 1))
cursor.executemany(sql_Property, sql_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_InstrumentProperty完成")
def __t_TransFeeRateDetail(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_TransFeeRateDetail')
if template is None:
self.logger.error("t_TransFeeRateDetail template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_transfeeratedetail where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_detail = """insert into siminfo.t_transfeeratedetail(
SettlementGroupID,TradingRole,HedgeFlag,ValueMode,OpenFeeRatio,
CloseYesterdayFeeRatio,CloseTodayFeeRatio,MinOpenFee,MinCloseFee,
MaxOpenFee,MaxCloseFee,InstrumentID,ParticipantID,
ClientID) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append(
self.__get_trans_fee_rate_detail(attr=template[SGID],
instrument=gold["InstrumentID"]))
cursor.executemany(sql_insert_detail, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_TransFeeRateDetail完成")
# 通过产品代码生成目标合约的保证金率
def __get_trans_fee_rate_detail(self, attr, instrument):
template = attr["template"]
trans_fee = attr["transFee"]
# 判断产品代码是否存在于模版
if instrument in trans_fee.keys():
params = (template[0], template[1], template[2], trans_fee[instrument][1],
trans_fee[instrument][0], trans_fee[instrument][0], trans_fee[instrument][0],
template[7], template[8], template[9], template[10], instrument, template[12],
template[13])
else:
params = (template[0], template[1], template[2], template[3], template[4], template[5], template[6],
template[7], template[8], template[9], template[10], instrument, template[12],
template[13])
return params
def __t_PriceBanding(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_PriceBanding')
if template is None:
self.logger.error("t_PriceBanding template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_PriceBanding where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_price = """INSERT INTO siminfo.t_PriceBanding (
SettlementGroupID,PriceLimitType,ValueMode,RoundingMode,
UpperValue,LowerValue,InstrumentID,TradingSegmentSN
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append((SGID, template[SGID][1], template[SGID][2], template[SGID][3],
template[SGID][4], template[SGID][5], gold["InstrumentID"],
template[SGID][7]))
cursor.executemany(sql_insert_price, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_PriceBanding完成")
def __t_MarketData(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
cursor.execute("delete from siminfo.t_MarketData where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert = """INSERT INTO siminfo.t_MarketData (
TradingDay,SettlementGroupID,LastPrice,PreSettlementPrice,
PreClosePrice,PreOpenInterest,OpenPrice,
HighestPrice,LowestPrice,Volume,Turnover,
OpenInterest,ClosePrice,SettlementPrice,
UpperLimitPrice,LowerLimitPrice,PreDelta,
CurrDelta,UpdateTime,UpdateMillisec,InstrumentID
)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
sql_params.append(
(self.TradingDay, SGID, None, gold["PreSettlementPrice"], gold["PreClosePrice"],
gold["PreOpenInterest"], None,
None, None, None, None,
None, None, None,
None, None, None,
None, "15:15:00", "100", gold["InstrumentID"]))
cursor.executemany(sql_insert, sql_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_MarketData完成")
def __check_file(self, file_name=None):
env_dist = os.environ
# 判断环境变量是否存在HOME配置
if 'HOME' not in env_dist:
self.logger.error("HOME not in environment variable")
return None
# 获取文件路径
catalog = env_dist['HOME']
catalog = '%s%s%s%s%s' % (catalog, os.path.sep, 'sim_data', os.path.sep, self.TradingDay)
# 合约信息
instrument = '%s%s%s' % (catalog, os.path.sep, self.file_instrument)
# 行情信息
depthmarketdata = '%s%s%s' % (catalog, os.path.sep, self.file_marketdata)
# 判断instrument.csv文件是否存在,不存在设置为空
if not os.path.exists(instrument):
self.logger.error("%s%s" % (instrument, " is not exists"))
instrument = None
# 判断depthmarketdata.csv文件是否存在,不存在设置为空
if not os.path.exists(depthmarketdata):
self.logger.error("%s%s" % (depthmarketdata, " is not exists"))
depthmarketdata = None
# 读取CSV文件
if file_name is None:
return self.__loadCSV(instrument), self.__loadCSV(depthmarketdata)
elif file_name == 'instrument':
return self.__loadCSV(instrument)
elif file_name == 'depthmarketdata':
return self.__loadCSV(depthmarketdata)
def __loadCSV(self, csv_file):
if csv_file is None:
return None
else:
return [row for row in csv.DictReader(open(csv_file))]
# 主要读取template数据
def __loadJSON(self, tableName):
_output = path.convert(self.initTemplate['initTemplate'])
_path = "%s%s%s%s" % (_output, | |
kubernetes_asyncio.client.V1EnvVar(name="OPSANI_ENVOY_PROXY_SERVICE_PORT", value=str(service_port)),
kubernetes_asyncio.client.V1EnvVar(name="OPSANI_ENVOY_PROXIED_CONTAINER_PORT", value=str(container_port)),
kubernetes_asyncio.client.V1EnvVar(name="OPSANI_ENVOY_PROXY_METRICS_PORT", value="9901")
],
ports=[
kubernetes_asyncio.client.V1ContainerPort(name="opsani-proxy", container_port=service_port),
kubernetes_asyncio.client.V1ContainerPort(name="opsani-metrics", container_port=9901),
]
)
# add the sidecar to the Deployment
if index is None:
self.obj.spec.template.spec.containers.append(container)
else:
self.obj.spec.template.spec.containers.insert(index, container)
# patch the deployment
await self.patch()
async def eject_sidecar(self, name: str) -> bool:
"""Eject an Envoy sidecar from the Deployment.
Returns True if the sidecar was ejected.
"""
await self.refresh()
container = self.remove_container(name)
if container:
await self.replace()
return True
return False
@contextlib.asynccontextmanager
async def rollout(self, *, timeout: Optional[servo.DurationDescriptor] = None) -> None:
"""Asynchronously wait for changes to a deployment to roll out to the cluster."""
# NOTE: The timeout_seconds argument must be an int or the request will fail
timeout_seconds = int(servo.Duration(timeout).total_seconds()) if timeout else None
# Resource version lets us track any change. Observed generation only increments
# when the deployment controller sees a significant change that requires rollout
resource_version = self.resource_version
observed_generation = self.status.observed_generation
desired_replicas = self.replicas
self.logger.info(f"applying adjustments to Deployment '{self.name}' and rolling out to cluster")
# Yield to let the changes be made
yield self
# Return fast if nothing was changed
if self.resource_version == resource_version:
self.logger.info(
f"adjustments applied to Deployment '{self.name}' made no changes, continuing"
)
return
# Create a Kubernetes watch against the deployment under optimization to track changes
self.logger.debug(
f"watching deployment Using label_selector={self.label_selector}, resource_version={resource_version}"
)
async with kubernetes_asyncio.client.api_client.ApiClient() as api:
v1 = kubernetes_asyncio.client.AppsV1Api(api)
async with kubernetes_asyncio.watch.Watch().stream(
v1.list_namespaced_deployment,
self.namespace,
label_selector=self.label_selector,
timeout_seconds=timeout_seconds,
) as stream:
async for event in stream:
# NOTE: Event types are ADDED, DELETED, MODIFIED, ERROR
# TODO: Create an enum...
event_type, deployment = event["type"], event["object"]
status:kubernetes_asyncio.client.V1DeploymentStatus = deployment.status
self.logger.debug(
f"deployment watch yielded event: {event_type} {deployment.kind} {deployment.metadata.name} in {deployment.metadata.namespace}: {status}"
)
if event_type == "ERROR":
stream.stop()
# FIXME: Not sure what types we expect here
raise servo.AdjustmentRejectedError(str(deployment), reason="start-failed")
# Check that the conditions aren't reporting a failure
if status.conditions:
self._check_conditions(status.conditions)
# Early events in the watch may be against previous generation
if status.observed_generation == observed_generation:
self.logger.debug(
"observed generation has not changed, continuing watch"
)
continue
# Check the replica counts. Once available, updated, and ready match
# our expected count and the unavailable count is zero we are rolled out
if status.unavailable_replicas:
self.logger.debug(
"found unavailable replicas, continuing watch",
status.unavailable_replicas,
)
continue
replica_counts = [
status.replicas,
status.available_replicas,
status.ready_replicas,
status.updated_replicas,
]
if replica_counts.count(desired_replicas) == len(replica_counts):
# We are done: all the counts match. Stop the watch and return
self.logger.success(f"adjustments to Deployment '{self.name}' rolled out successfully", status)
stream.stop()
return
# watch doesn't raise a timeoutError when when elapsed, treat fall through as timeout
raise WatchTimeoutError()
def _check_conditions(self, conditions: List[kubernetes_asyncio.client.V1DeploymentCondition]) -> None:
for condition in conditions:
if condition.type == "Available":
if condition.status == "True":
# If we hit on this and have not raised yet we are good to go
break
elif condition.status in ("False", "Unknown"):
# Condition has not yet been met, log status and continue monitoring
self.logger.debug(
f"Condition({condition.type}).status == '{condition.status}' ({condition.reason}): {condition.message}"
)
else:
raise servo.AdjustmentFailedError(
f"encountered unexpected Condition status '{condition.status}'"
)
elif condition.type == "ReplicaFailure":
# TODO: Check what this error looks like
raise servo.AdjustmentRejectedError(
f"ReplicaFailure: message='{condition.status.message}', reason='{condition.status.reason}'",
reason="start-failed"
)
elif condition.type == "Progressing":
if condition.status in ("True", "Unknown"):
# Still working
self.logger.debug("Deployment update is progressing", condition)
break
elif condition.status == "False":
raise servo.AdjustmentRejectedError(
f"ProgressionFailure: message='{condition.status.message}', reason='{condition.status.reason}'",
reason="start-failed"
)
else:
raise servo.AdjustmentFailedError(
f"unknown deployment status condition: {condition.status}"
)
async def raise_for_status(self, adjustments: List[servo.Adjustment]) -> None:
# NOTE: operate off of current state, assuming you have checked is_ready()
status = self.obj.status
self.logger.trace(f"current deployment status is {status}")
if status is None:
raise RuntimeError(f'No such deployment: {self.name}')
if not status.conditions:
raise RuntimeError(f'Deployment is not running: {self.name}')
# Check for failure conditions
self._check_conditions(status.conditions)
await self.raise_for_failed_pod_adjustments(adjustments=adjustments)
# Catchall
self.logger.trace(f"unable to map deployment status to exception. Deployment: {self.obj}")
raise RuntimeError(f"Unknown Deployment status for '{self.name}': {status}")
async def raise_for_failed_pod_adjustments(self, adjustments: List[servo.Adjustment]):
pods = await self.get_latest_pods()
self.logger.trace(f"latest pod(s) status {list(map(lambda p: p.obj.status, pods))}")
unschedulable_pods = [
pod for pod in pods
if pod.obj.status.conditions and any(
cond.reason == "Unschedulable" for cond in pod.obj.status.conditions
)
]
if unschedulable_pods:
pod_messages = []
for pod in unschedulable_pods:
cond_msgs = []
for unschedulable_condition in filter(lambda cond: cond.reason == "Unschedulable", pod.obj.status.conditions):
unschedulable_adjustments = list(filter(lambda a: a.setting_name in unschedulable_condition.message, adjustments))
cond_msgs.append(
f"Requested adjustment(s) ({', '.join(map(str, unschedulable_adjustments))}) cannot be scheduled due to \"{unschedulable_condition.message}\""
)
pod_messages.append(f"{pod.obj.metadata.name} - {'; '.join(cond_msgs)}")
raise servo.AdjustmentRejectedError(
f"{len(unschedulable_pods)} pod(s) could not be scheduled for deployment {self.name}: {', '.join(pod_messages)}",
reason="unschedulable"
)
image_pull_failed_pods = [
pod for pod in pods
if pod.obj.status.container_statuses and any(
cont_stat.state and cont_stat.state.waiting and cont_stat.state.waiting.reason in ["ImagePullBackOff", "ErrImagePull"]
for cont_stat in pod.obj.status.container_statuses
)
]
if image_pull_failed_pods:
raise servo.AdjustmentFailedError(
f"Container image pull failure detected on {len(image_pull_failed_pods)} pods: {', '.join(map(lambda pod: pod.obj.metadata.name, pods))}",
reason="image-pull-failed"
)
restarted_pods_container_statuses = [
(pod, cont_stat) for pod in pods for cont_stat in (pod.obj.status.container_statuses or [])
if cont_stat.restart_count > 0
]
if restarted_pods_container_statuses:
pod_to_counts = collections.defaultdict(list)
for pod_cont_stat in restarted_pods_container_statuses:
pod_to_counts[pod_cont_stat[0].obj.metadata.name].append(f"{pod_cont_stat[1].name} x{pod_cont_stat[1].restart_count}")
pod_message = ", ".join(map(
lambda kv_tup: f"{kv_tup[0]} - {'; '.join(kv_tup[1])}",
list(pod_to_counts.items())
))
raise servo.AdjustmentRejectedError(
f"Deployment {self.name} pod(s) crash restart detected: {pod_message}",
reason="unstable"
)
# Unready pod catchall
unready_pod_conds = [
(pod, cond) for pod in pods for cond in (pod.obj.status.conditions or [])
if cond.type == "Ready" and cond.status == "False"
]
if unready_pod_conds:
pod_message = ", ".join(map(
lambda pod_cond: f"{pod_cond[0].obj.metadata.name} - (reason {pod_cond[1].reason}) {pod_cond[1].message}",
unready_pod_conds
))
raise servo.AdjustmentRejectedError(
f"Found {len(unready_pod_conds)} unready pod(s) for deployment {self.name}: {pod_message}",
reason="start-failed"
)
async def get_restart_count(self) -> int:
count = 0
for pod in await self.get_latest_pods():
try:
count += await pod.get_restart_count()
except kubernetes_asyncio.client.exceptions.ApiException as error:
if error.status == 404:
# Pod no longer exists, move on
pass
else:
raise error
return count
# Workarounds to allow use of api_client.deserialize() public method instead of private api_client._ApiClient__deserialize
# TODO: is this workaround worth it just to avoid using the private method?
# fix for https://github.com/kubernetes-client/python/issues/977#issuecomment-594045477
def default_kubernetes_json_serializer(o: Any) -> Any:
if isinstance(o, (datetime.datetime, datetime.date)):
return o.isoformat()
raise TypeError(f'Object of type {o.__class__.__name__} '
f'is not JSON serializable')
# https://github.com/kubernetes-client/python/issues/977#issuecomment-592030030
class FakeKubeResponse:
"""Mocks the RESTResponse object as a workaround for kubernetes python api_client deserialization"""
def __init__(self, obj):
self.data = json.dumps(obj, default=default_kubernetes_json_serializer)
# Use alias generator so that dromedary case can be parsed to snake case properties to match k8s python client behaviour
def to_dromedary_case(string: str) -> str:
split = string.split('_')
return split[0] + ''.join(word.capitalize() for word in split[1:])
class RolloutBaseModel(pydantic.BaseModel):
class Config:
# arbitrary_types_allowed = True
alias_generator = to_dromedary_case
allow_population_by_field_name = True
# Pydantic type models for argo rollout spec: https://argoproj.github.io/argo-rollouts/features/specification/
# https://github.com/argoproj/argo-rollouts/blob/master/manifests/crds/rollout-crd.yaml
# NOTE/TODO: fields typed with Any should maintain the same form when dumped as when they are parsed. Should the need
# arise to interact with such fields, they will need to have an explicit type defined so the alias_generator is applied
class RolloutV1LabelSelector(RolloutBaseModel): # must type out k8s models as well to allow parse_obj to work
match_expressions: Any
match_labels: Optional[Dict[str, str]]
class RolloutV1ObjectMeta(RolloutBaseModel):
annotations: Optional[Dict[str, str]]
cluster_name: Optional[str]
creation_timestamp: Optional[datetime.datetime]
deletion_grace_period_seconds: Optional[int]
deletion_timestamp: Optional[datetime.datetime]
finalizers: Optional[List[str]]
generate_name: Optional[str]
generation: Optional[int]
labels: Optional[Dict[str, str]]
managed_fields: Any
name: Optional[str]
namespace: Optional[str]
owner_references: Any
resource_version: Optional[str]
self_link: Optional[str]
uid: Optional[str]
class RolloutV1EnvVar(RolloutBaseModel):
name: str
value: Optional[str]
value_from: Any
class RolloutV1ContainerPort(RolloutBaseModel):
container_port: int
host_ip: Optional[str]
host_port: Optional[int]
name: Optional[str]
protocol: Optional[str]
class RolloutV1ResourceRequirements(RolloutBaseModel):
limits: Optional[Dict[str, str]]
requests: Optional[Dict[str, str]]
class RolloutV1Container(RolloutBaseModel):
args: Optional[List[str]]
command: Optional[List[str]]
env: Optional[List[RolloutV1EnvVar]]
env_from: Any
image: str
image_pull_policy: Optional[str]
lifecycle: Any
liveness_probe: Any
name: str
ports: Optional[List[RolloutV1ContainerPort]]
readiness_probe: Any
resources: Optional[RolloutV1ResourceRequirements]
security_context: Any
startup_probe: Any
stdin: Optional[bool]
stdin_once: Optional[bool]
termination_message_path: Optional[str]
termination_message_policy: Optional[str]
tty: Optional[bool]
volume_devices: Any
volume_mounts: Any
working_dir: Optional[str]
class RolloutV1PodSpec(RolloutBaseModel):
active_deadline_seconds: Optional[int]
affinity: Any
automount_service_account_token: Optional[bool]
containers: List[RolloutV1Container]
dns_config: Any
dns_policy: Optional[str]
enable_service_links: Optional[bool]
ephemeral_containers: Any
host_aliases: Any
host_ipc: Optional[bool]
host_network: Optional[bool]
host_pid: Optional[bool]
hostname: Optional[str]
image_pull_secrets: Any
init_containers: Optional[List[RolloutV1Container]]
node_name: Optional[str]
node_selector: Optional[Dict[str, str]]
overhead: Optional[Dict[str, str]]
preemption_policy: Optional[str]
priority: Optional[int]
priority_class_name: Optional[str]
readiness_gates: Any
restart_policy: Optional[str]
runtime_class_name: Optional[str]
scheduler_name: Optional[str]
security_context: Any
service_account: Optional[str]
service_account_name: Optional[str]
share_process_namespace: Optional[bool]
subdomain: Optional[str]
termination_grace_period_seconds: Optional[int]
tolerations: Any
topology_spread_constraints: Any
volumes: Any
class RolloutV1PodTemplateSpec(RolloutBaseModel):
metadata: RolloutV1ObjectMeta
spec: RolloutV1PodSpec
class RolloutV1WorkloadRef(RolloutBaseModel):
api_version: str
kind: str
name: str
class RolloutSpec(RolloutBaseModel):
replicas: int
selector: Optional[RolloutV1LabelSelector]
template: Optional[RolloutV1PodTemplateSpec]
workload_ref: Optional[RolloutV1WorkloadRef]
min_ready_seconds: Optional[int]
| |
# This file is automatically generated. Do not edit.
glyph2tile = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 28, 29, 30, 31, 32, 34, 35, 36, 37,
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 137, 138, 139, 140, 141, 142, 143, 144, 145, 147,
148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
172, 173, 174, 175, 176, 177, 178, 179, 181, 182, 183, 184,
185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
221, 222, 223, 224, 225, 226, 227, 228, 230, 231, 232, 233,
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293, 294,
295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 319,
320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343,
344, 345, 346, 347, 348, 349, 350, 351, 352, 355, 356, 357,
358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 369, 370,
371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 382, 383,
384, 385, 386, 387, 388, 389, 390, 391, 392, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 137,
138, 139, 140, 141, 142, 143, 144, 145, 147, 148, 149, 150,
151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
175, 176, 177, 178, 179, 181, 182, 183, 184, 185, 186, 187,
188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 227, 228, 230, 231, 232, 233, 234, 235, 236,
237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
285, 286, 287, 288, 290, 291, 292, 293, 294, 295, 296, 297,
298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
310, 311, 312, 313, 314, 315, 316, 317, 319, 320, 321, 322,
323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
347, 348, 349, 350, 351, 352, 355, 356, 357, 358, 359, 360,
361, 362, 363, 364, 365, 366, 367, 369, 370, 371, 372, 373,
374, 375, 376, 377, 378, 379, 380, 382, 383, 384, 385, 386,
387, 388, 389, 390, 391, 392, 393, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29,
30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
127, 128, 129, 130, 131, 132, 133, 134, 135, 137, 138, 139,
140, 141, 142, 143, 144, 145, 147, 148, 149, 150, 151, 152,
153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 181, 182, 183, 184, 185, 186, 187, 188, 189,
190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
226, 227, 228, 230, 231, 232, 233, 234, 235, 236, 237, 238,
239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
312, 313, 314, 315, 316, 317, 319, 320, 321, 322, 323, 324,
325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336,
337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
349, 350, 351, 352, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 369, 370, 371, 372, 373, 374, 375,
376, 377, 378, 379, 380, 382, 383, 384, 385, 386, 387, 388,
389, 390, 391, 392, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636, 636,
636, 636, 636, 636, 636, 636, 636, 636, 636, | |
'include_tags': self.include_tags, 'exclude_tags': self.exclude_tags,
'C_ACC_Version': self.CC_ACC_Version, 'CPP_ACC_Version': self.CPP_ACC_Version,
'FC_ACC_Version': self.FC_ACC_Version, 'PreCompileCommands': self.PreCompileCommands,
'PostCompileCommands': self.PostCompileCommands, 'PreRunCommands': self.PreRunCommands,
'PostRunCommands': self.PostRunCommands}
return returned
class tag_evaluation:
def __init__(self, tag_string):
self.tag_order = get_all_tags(tag_string)
self.lookup = self.recursive_build(self.tag_order, [], tag_string)
def eval_fast(self, tags):
temp_reference = self.lookup
for x in self.tag_order:
if x in tags:
temp_reference = temp_reference[True]
else:
temp_reference = temp_reference[False]
return temp_reference
def recursive_build(self, remaining_tags, values, tag_string):
if len(remaining_tags) == 0:
return self.eval_slow(values, tag_string)
else:
lookup = {False: self.recursive_build(remaining_tags[1:], values + [False], tag_string),
True: self.recursive_build(remaining_tags[1:], values + [True], tag_string)}
return lookup
def eval_slow(self, values, tag_string):
depth = 0
passed_str = ""
start = 0
for x in list(range(len(tag_string))):
if tag_string[x] == "(":
if depth > 0:
passed_str = passed_str + tag_string[x]
else:
start = x
depth += 1
elif tag_string[x] == ")":
if depth == 1:
tag_string = tag_string[:start] + str(self.eval_slow(values, passed_str)) + tag_string[x + 1:]
passed_str = ""
depth -= 1
depth -= 1
else:
if depth > 0:
passed_str = passed_str + tag_string[x]
internal_tag_order = []
internal_values = []
for x in self.tag_order:
internal_tag_order.append(x)
for x in values:
internal_values.append(x)
for x in list(range(len(self.tag_order))):
internal_tag_order.append("!" + self.tag_order[x])
internal_values.append(not values[x])
internal_tag_order.append("!False")
internal_values.append(True)
internal_tag_order.append("!True")
internal_values.append(False)
ops = []
for x in list(range(len(internal_tag_order))):
tag_string = tag_string.replace(internal_tag_order[x], str(internal_values[x]))
for x in tag_string:
if x == "|":
ops.append("|")
elif x == "&":
ops.append("&")
tag_string = tag_string.replace("|", "&")
bools = tag_string.split("&")
result = None
if len(ops) == 0:
return bo(bools[0])
if ops[0] == "|":
result = bo(bools[0]) or bo(bools[1])
if ops[0] == "&":
result = bo(bools[0]) and bo(bools[1])
if len(ops) == 1:
return result
for x in list(range(1, len(ops))):
if ops[x] == "|":
result = result or bools[x + 1]
if ops[x] == "&":
result = result and bools[x + 1]
return result
def process_conditionals(command, res, err):
found_pass = False
while command.strip().upper().startswith("ONPASS:") or command.strip().upper().startswith("ONERROR:") or command.strip().upper.startswith("ONFAIL:"):
if command.strip().upper().startswith("ONPASS:"):
if (res == 0):
command = command[7:]
found_pass = True
continue
if command.strip().upper().starswith("ONERROR:"):
if err != "":
command = command[8:]
found_pass = True
continue
if command.strip().upper.startswith("ONFAIL:"):
if res != 0:
command = command[7:]
found_pass = True
continue
if found_pass:
return command
return False
def get_all_tags(tag_string):
round0 = tag_string.split("|")
round1 = []
round2 = []
round3 = []
final = []
for x in list(range(len(round0))):
round1 = round1 + round0[x].split("&")
for x in list(range(len(round1))):
round2 = round2 + round1[x].split("(")
for x in list(range(len(round2))):
round3 = round3 + round2[x].split(")")
for x in round3:
if not x in final and x != "":
final.append(x.replace("!", ""))
return final
def bo(text):
if text == "False":
return False
else:
return True
def command_replace(command, find, replace):
matches = re.finditer(find, command)
false_matches = re.finditer("$" + find, command)
match_inds = []
for x in matches:
match_inds.append(x.start())
false_match_inds = []
for x in false_matches:
false_match_inds.append(x.start())
for ind in match_inds:
if ind - 1 not in false_match_inds:
command = command[0:ind] + replace + command[ind + len(find):]
return command
def translate(data, skipDict=False):
if isinstance(data, dict) and not skipDict:
returned = {}
for key in data.keys():
returned[translate(key, skipDict=True)] = translate(data[key], skipDict=True)
return returned
elif isinstance(data, list):
return[translate(key, skipDict=True) for key in data]
elif isinstance(data, unicode):
return data.encode('utf-8')
else:
return data
def jsonLoadWrapper(f):
if sys.version_info[0] == 3:
return json.load(f)
else:
return translate(json.load(f, object_hook=translate), skipDict=True)
def isFortran(testname):
if testname[-4:] == ".F90":
return True
elif testname[-4:] == ".f90":
return True
elif testname[-4:] == ".for":
return True
elif testname[-4:] == ".f95":
return True
elif testname[-4:] == ".F90":
return True
elif testname[-4:] == ".f03":
return True
elif testname[-4:] == ".F03":
return True
else:
return False
def isC(testname):
if testname[-2:] == ".c":
return True
else:
return False
def isCPP(testname):
if testname[-3:] == ".cc":
return True
elif testname[-2:] == ".C":
return True
elif testname[-4:] == ".cxx":
return True
elif testname[-4:] == ".c++":
return True
elif testname[-4:] == ".cpp":
return True
elif testname[-4:] == ".CPP":
return True
elif testname[-3:] == ".cp":
return True
else:
return False
def testsuite_compare(ts1, ts2):
if len(list(set(ts1.keys()) ^ set(ts2.keys()))) > 0:
return False
for key in ts1.keys():
if key == "id":
continue
if not complex_compare(ts1[key], ts2[key]):
return False
return True
def complex_compare(obj1, obj2):
if isinstance(obj1, list):
if isinstance(obj2, list):
return list_compare(obj1, obj2)
else:
return False
if isinstance(obj1, dict):
if isinstance(obj2, dict):
if len(list(set(obj1.keys()) ^ set(obj2.keys()))) > 0:
return False
same = True
for key in obj1.keys():
same = same or complex_compare(obj1[key], obj2[key])
return same
else:
return False
return obj1 == obj2
def list_compare(list1, list2):
if len(list1) != len(list2):
return False
for x in list1:
if not x in list2:
return False
for x in list2:
if not x in list1:
return False
return True
def run_command(command):
if g_shell is None:
t_shell = shellInterface()
return t_shell.runCommand(command)
return g_shell.runCommand(command)
def passed(results_array):
if results_array[0] != 0 or len(results_array[2]) > 0:
return False
return True
def assert_created_directory(path):
if not isdir(path):
res = run_command("mkdir " + path)
if not passed(res):
print("There was an issue creating the directory: " + path)
if g_verbose['oserrors']:
print(res[2])
print(res[0])
sys.exit()
def generatePathListFromString(string):
homedir = dirname(realpath(__file__))
parts = string.split(',')
curfilename = ""
paths = []
for part in parts:
if curfilename != "":
curfilename = curfilename + ',' + part
else:
curfilename = part
if isfile(curfilename):
paths.append(curfilename)
curfilename = ""
elif isfile(join(homedir, curfilename)):
paths.append(join(homedir, curfilename))
curfilename = ""
if curfilename != "":
print("Could not parse given config paths. Please check to make sure they exist")
return paths
def print_warnings():
if sys.version_info[0] == 2 or (sys.version_info[0] == 3 and sys.version_info[1] < 3):
print("Will not be able to terminate processing running past timeout.")
print("To use this feature, please use python 3.3 or greater")
def export_env(outpath):
temp_shell = shellInterface()
current_env = temp_shell.env
f = open(outpath, 'w')
json.dump(current_env, f, indent=4, sort_keys=True)
f.close()
print("Exported current environment to: " + outpath)
def clean_build_dir(dirname):
if isdir(dirname):
for filename in listdir(dirname):
if isfile(join(dirname, filename)):
remove(join(dirname, filename))
elif isdir(join(dirname, filename)):
clean_build_dir(join(dirname, filename))
rmdir(join(dirname, filename))
def copy_headers_to_dir(tl, dirname):
header_prefix = "acc_testsuite"
for x in [(tl.FortranTestLocation, ".Fh"), (tl.CPPTestLocation, ".h"), (tl.CTestLocation, ".h")]:
if isfile(join(x[0], header_prefix + x[1])):
try:
shutil.copyfile(join(x[0], header_prefix + x[1]), join(dirname, header_prefix + x[1]))
except OSError:
if g_verbose['oserrors'] or g_verbose['debug']:
print("Error copying header file to mutated test directory")
if g_verbose['debug']:
traceback.print_exc()
print("H")
elif g_verbose['oserrors'] or g_verbose['debug'] or g_verbose['info']:
print("Could not find header file. If linking is handled in compilation flags, this can be ignored")
OpenACCVersions = ["1.0", "2.0", "2.5", "2.6", "2.7"]
g_config = None # type: Optional[config]
g_system = None # type: Optional[system]
g_results = None # type: Optional[results]
g_shell = None # type: Optional[shellInterface]
g_testsuite = None # type: Optional[TestList]
g_subprocess_runtime = 0
g_verbose = {'commands': False, 'results': False, 'errors': False, 'output': False, 'oserrors': False, 'debug': False, 'info': False}
def main():
start = time()
print_warnings()
global g_system
global g_config
global g_results
global g_testsuite
global g_shell
infiles = [] # type: List[str]
outfile = None # type: Optional[str]
config_path = [] # type: Optional[List[str]]
temp_system_name = None # type: str
clean = False
specific_tests = []
run_configs = []
for x in list(range(1, len(sys.argv))):
if sys.argv[x].startswith("-c="):
if config_path != []:
print("Please have only one '-c=' flag. If you want to run multiple configuations, please concatinate them as a comma deliniated list following the '-c='")
sys.exit()
config_path = '='.join(sys.argv[x].split("=")[1:]).strip()
config_path = generatePathListFromString(config_path)
elif sys.argv[x].startswith("-o="):
if outfile is not None:
print("Please have only one '-o= flag. Can only output one results file.")
sys.exit()
outfile = '='.join(sys.argv[x].split("=")[1:]).strip()
elif sys.argv[x].startswith("-in="):
if infiles != []:
print("Please have only one '-in=' flag. If you want to merge multiple results files, please concatinate them as a comma deliniated list following the 'in='")
sys.exit()
infiles = ('='.join(sys.argv[x].split("=")[1:]).strip()).split(",")
elif sys.argv[x].lower() == "verbose":
g_verbose['commands'] = True
g_verbose['results'] = True
g_verbose['errors'] = True
g_verbose['output'] = True
g_verbose['oserrors'] = True
g_verbose['info'] = True
elif sys.argv[x].lower().startswith('-system='):
temp_system_name = sys.argv[x].split('=')[-1]
elif sys.argv[x].lower().startswith("-env="):
export_env('='.join(sys.argv[x].split('=')[1:]))
elif sys.argv[x].lower() == "clean":
clean = True
else:
specific_tests.append(sys.argv[x])
#print("Did not understand arg: " + sys.argv[x])
if outfile is None and (config_path != [] or infiles != []) :
print("Please specify an output file with the argument '-o=<output_file>'")
sys.exit()
for config_index in list(range(len(config_path))):
g_config = config()
g_config.include_config(config_path[config_index])
g_config.finalize_config()
if clean:
clean_build_dir(g_config.build_dir)
clean_build_dir(g_config.partial_results_dir)
clean_build_dir(g_config.mutated_test_dir)
if not isdir(g_config.build_dir):
mkdir(g_config.build_dir)
if not isdir(g_config.partial_results_dir):
mkdir(g_config.partial_results_dir)
if not isdir(g_config.mutated_test_dir):
mkdir(g_config.mutated_test_dir)
if temp_system_name is not None:
if g_config.system_name | |
"""DistributedObjectAI module: contains the DistributedObjectAI class"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectBase import DistributedObjectBase
from direct.showbase import PythonUtil
from pandac.PandaModules import *
#from PyDatagram import PyDatagram
#from PyDatagramIterator import PyDatagramIterator
class DistributedObjectAI(DistributedObjectBase):
notify = directNotify.newCategory("DistributedObjectAI")
QuietZone = 1
def __init__(self, air):
try:
self.DistributedObjectAI_initialized
except:
self.DistributedObjectAI_initialized = 1
DistributedObjectBase.__init__(self, air)
self.accountName=''
# Record the repository
self.air = air
# Record our distributed class
className = self.__class__.__name__
self.dclass = self.air.dclassesByName[className]
# init doId pre-allocated flag
self.__preallocDoId = 0
# used to track zone changes across the quiet zone
# NOTE: the quiet zone is defined in OTP, but we need it
# here.
self.lastNonQuietZone = None
self._DOAI_requestedDelete = False
# These are used to implement beginBarrier().
self.__nextBarrierContext = 0
self.__barriers = {}
self.__generated = False
# reference count for multiple inheritance
self.__generates = 0
self._zoneData = None
# Uncomment if you want to debug DO leaks
#def __del__(self):
# """
# For debugging purposes, this just prints out what got deleted
# """
# print ("Destructing: " + self.__class__.__name__)
if __debug__:
def status(self, indent=0):
"""
print out doId(parentId, zoneId) className
and conditionally show generated, disabled, neverDisable,
or cachable
"""
spaces=' '*(indent+2)
try:
print "%s%s:"%(
' '*indent, self.__class__.__name__)
print "%sfrom DistributedObject doId:%s, parent:%s, zone:%s"%(
spaces,
self.doId, self.parentId, self.zoneId),
flags=[]
if self.__generated:
flags.append("generated")
if self.air == None:
flags.append("deleted")
if len(flags):
print "(%s)"%(" ".join(flags),),
print
except Exception, e: print "%serror printing status"%(spaces,), e
def getDeleteEvent(self):
# this is sent just before we get deleted
if hasattr(self, 'doId'):
return 'distObjDelete-%s' % self.doId
return None
def sendDeleteEvent(self):
# this is called just before we get deleted
delEvent = self.getDeleteEvent()
if delEvent:
messenger.send(delEvent)
def getCacheable(self):
""" This method exists only to mirror the similar method on
DistributedObject. AI objects aren't cacheable. """
return False
def deleteOrDelay(self):
""" This method exists only to mirror the similar method on
DistributedObject. AI objects don't have delayDelete, they
just get deleted immediately. """
self.delete()
def getDelayDeleteCount(self):
return 0
def delete(self):
"""
Inheritors should redefine this to take appropriate action on delete
Note that this may be called multiple times if a class inherits
from DistributedObjectAI more than once.
"""
self.__generates -= 1
if self.__generates < 0:
self.notify.debug('DistributedObjectAI: delete() called more times than generate()')
if self.__generates == 0:
# prevent this code from executing multiple times
if self.air is not None:
# self.doId may not exist. The __dict__ syntax works around that.
assert self.notify.debug('delete(): %s' % (self.__dict__.get("doId")))
if not self._DOAI_requestedDelete:
# this logs every delete that was not requested by us.
# TODO: this currently prints warnings for deletes of objects
# that we did not create. We need to add a 'locally created'
# flag to every object to filter these out.
"""
DistributedObjectAI.notify.warning(
'delete() called but requestDelete never called for %s: %s'
% (self.__dict__.get('doId'), self.__class__.__name__))
"""
"""
# print a stack trace so we can detect whether this is the
# result of a network msg.
# this is slow.
from direct.showbase.PythonUtil import StackTrace
DistributedObjectAI.notify.warning(
'stack trace: %s' % StackTrace())
"""
self._DOAI_requestedDelete = False
self.releaseZoneData()
# Clean up all the pending barriers.
for barrier in self.__barriers.values():
barrier.cleanup()
self.__barriers = {}
self.air.stopTrackRequestDeletedDO(self)
# DCR: I've re-enabled this block of code so that Toontown's
# AI won't leak channels.
# Let me know if it causes trouble.
### Asad: As per Roger's suggestion, turn off the following
### block until a solution is thought out of how to prevent
### this delete message or to handle this message better
# TODO: do we still need this check?
if not hasattr(self, "doNotDeallocateChannel"):
if self.air and not hasattr(self.air, "doNotDeallocateChannel"):
if self.air.minChannel <= self.doId <= self.air.maxChannel:
self.air.deallocateChannel(self.doId)
self.air = None
self.parentId = None
self.zoneId = None
self.__generated = False
def isDeleted(self):
"""
Returns true if the object has been deleted,
or if it is brand new and hasnt yet been generated.
"""
return self.air == None
def isGenerated(self):
"""
Returns true if the object has been generated
"""
return self.__generated
def getDoId(self):
"""
Return the distributed object id
"""
return self.doId
def preAllocateDoId(self):
"""
objects that need to have a doId before they are generated
can call this to pre-allocate a doId for the object
"""
assert not self.__preallocDoId
self.doId = self.air.allocateChannel()
self.__preallocDoId = 1
def announceGenerate(self):
"""
Called after the object has been generated and all
of its required fields filled in. Overwrite when needed.
"""
pass
def addInterest(self, zoneId, note="", event=None):
self.air.addInterest(self.doId, zoneId, note, event)
def b_setLocation(self, parentId, zoneId):
self.d_setLocation(parentId, zoneId)
self.setLocation(parentId, zoneId)
def d_setLocation(self, parentId, zoneId):
self.air.sendSetLocation(self, parentId, zoneId)
def setLocation(self, parentId, zoneId):
# Prevent Duplicate SetLocations for being Called
if (self.parentId == parentId) and (self.zoneId == zoneId):
return
oldParentId = self.parentId
oldZoneId = self.zoneId
self.air.storeObjectLocation(self, parentId, zoneId)
if ((oldParentId != parentId) or
(oldZoneId != zoneId)):
self.releaseZoneData()
messenger.send(self.getZoneChangeEvent(), [zoneId, oldZoneId])
# if we are not going into the quiet zone, send a 'logical' zone
# change message
if zoneId != DistributedObjectAI.QuietZone:
lastLogicalZone = oldZoneId
if oldZoneId == DistributedObjectAI.QuietZone:
lastLogicalZone = self.lastNonQuietZone
self.handleLogicalZoneChange(zoneId, lastLogicalZone)
self.lastNonQuietZone = zoneId
def getLocation(self):
try:
if self.parentId <= 0 and self.zoneId <= 0:
return None
# This is a -1 stuffed into a uint32
if self.parentId == 0xffffffff and self.zoneId == 0xffffffff:
return None
return (self.parentId, self.zoneId)
except AttributeError:
return None
def postGenerateMessage(self):
self.__generated = True
messenger.send(self.uniqueName("generate"), [self])
def updateRequiredFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateAllRequiredFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateRequiredOtherFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def updateAllRequiredOtherFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def sendSetZone(self, zoneId):
self.air.sendSetZone(self, zoneId)
def startMessageBundle(self, name):
self.air.startMessageBundle(name)
def sendMessageBundle(self):
self.air.sendMessageBundle(self.doId)
def getZoneChangeEvent(self):
# this event is generated whenever this object changes zones.
# arguments are newZoneId, oldZoneId
# includes the quiet zone.
return DistributedObjectAI.staticGetZoneChangeEvent(self.doId)
def getLogicalZoneChangeEvent(self):
# this event is generated whenever this object changes to a
# non-quiet-zone zone.
# arguments are newZoneId, oldZoneId
# does not include the quiet zone.
return DistributedObjectAI.staticGetLogicalZoneChangeEvent(self.doId)
@staticmethod
def staticGetZoneChangeEvent(doId):
return 'DOChangeZone-%s' % doId
@staticmethod
def staticGetLogicalZoneChangeEvent(doId):
return 'DOLogicalChangeZone-%s' % doId
def handleLogicalZoneChange(self, newZoneId, oldZoneId):
"""this function gets called as if we never go through the
quiet zone. Note that it is called once you reach the newZone,
and not at the time that you leave the oldZone."""
messenger.send(self.getLogicalZoneChangeEvent(),
[newZoneId, oldZoneId])
def getZoneData(self):
# Call this to get an AIZoneData object for the current zone.
# This class will hold onto it as self._zoneData
# setLocation destroys self._zoneData if we move away to
# a different zone
if self._zoneData is None:
from otp.ai.AIZoneData import AIZoneData
self._zoneData = AIZoneData(self.air, self.parentId, self.zoneId)
return self._zoneData
def releaseZoneData(self):
# You can call this to release any AIZoneData object that we might be
# holding onto. If we're the last one for the current zone, the data
# will be destroyed (render, collision traverser, etc.)
# Note that the AIZoneData object that we're holding will be destroyed
# automatically when we move away or are destroyed.
if self._zoneData is not None:
self._zoneData.destroy()
self._zoneData = None
def getRender(self):
# note that this will return a different node if we change zones
#return self.air.getRender(self.zoneId)
return self.getZoneData().getRender()
def getNonCollidableParent(self):
return self.getZoneData().getNonCollidableParent()
def getParentMgr(self):
#return self.air.getParentMgr(self.zoneId)
return self.getZoneData().getParentMgr()
def getCollTrav(self, *args, **kArgs):
return self.getZoneData().getCollTrav(*args, **kArgs)
def sendUpdate(self, fieldName, args = []):
assert self.notify.debugStateCall(self)
if self.air:
self.air.sendUpdate(self, fieldName, args)
def GetPuppetConnectionChannel(self, doId):
return doId + (1L << 32)
def GetAccountConnectionChannel(self, doId):
return doId + (3L << 32)
def GetAccountIDFromChannelCode(self, channel):
return channel >> 32
def GetAvatarIDFromChannelCode(self, channel):
return channel & 0xffffffffL
def sendUpdateToAvatarId(self, avId, fieldName, args):
assert self.notify.debugStateCall(self)
channelId = self.GetPuppetConnectionChannel(avId)
self.sendUpdateToChannel(channelId, fieldName, args)
def sendUpdateToAccountId(self, accountId, fieldName, args):
assert self.notify.debugStateCall(self)
channelId = self.GetAccountConnectionChannel(accountId)
self.sendUpdateToChannel(channelId, fieldName, args)
def sendUpdateToChannel(self, channelId, fieldName, args):
assert self.notify.debugStateCall(self)
if self.air:
self.air.sendUpdateToChannel(self, channelId, fieldName, | |
#!/usr/bin/env python
import json
import os
import os.path
import sys
from argparse import ArgumentParser
from pathlib import Path
import requests
import urllib3
from api.Account import Account
from api.RestApi import RestApi
from api.System import System
from api.Validation import Validation
from github.github import latest_release
from monitoring import Monitoring
from utils.utils import run_shell_command
from utils.utils import Helpers
from utils.utils import bcolors
from version import __version__
from env_vars import COMPOSE_FILE_OVERIDE, NODE_BINARY_OVERIDE, NGINX_BINARY_OVERIDE, NODE_END_POINT, \
DISABLE_VERSION_CHECK
from setup import Base, Docker, SystemD
urllib3.disable_warnings()
cli = ArgumentParser()
cli.add_argument('subcommand', help='Subcommand to run',
choices=["docker", "systemd", "api", "monitoring", "version", "optimise-node", "auth"])
apicli = ArgumentParser(
description='API commands')
api_parser = apicli.add_argument(dest="apicommand",
choices=["validation", "account", "health", "version", "universe", "metrics",
"system"])
cwd = os.getcwd()
def get_decorator(args, parent):
def decorator(func):
parser = parent.add_parser(func.__name__.replace("_", "-"), description=func.__doc__)
for arg in args:
parser.add_argument(*arg[0], **arg[1])
parser.set_defaults(func=func)
return decorator
def argument(*name_or_flags, **kwargs):
return list(name_or_flags), kwargs
dockercli = ArgumentParser(
description='Docker commands')
docker_parser = dockercli.add_subparsers(dest="dockercommand")
def dockercommand(args=[], parent=docker_parser):
return get_decorator(args, parent)
systemdcli = ArgumentParser(
description='Systemd commands')
systemd_parser = systemdcli.add_subparsers(dest="systemdcommand")
def systemdcommand(args=[], parent=systemd_parser):
return get_decorator(args, parent)
validationcli = ArgumentParser(
description='validation commands')
validation_parser = validationcli.add_subparsers(dest="validationcommand")
def validationcommand(args=[], parent=validation_parser):
return get_decorator(args, parent)
accountcli = ArgumentParser(
description='account commands')
account_parser = accountcli.add_subparsers(dest="accountcommand")
def accountcommand(args=[], parent=account_parser):
return get_decorator(args, parent)
systemapicli = ArgumentParser(
description='systemapi commands')
systemapi_parser = systemapicli.add_subparsers(dest="systemapicommand")
def systemapicommand(args=[], parent=systemapi_parser):
return get_decorator(args, parent)
monitoringcli = ArgumentParser(
description='API command')
monitoring_parser = monitoringcli.add_subparsers(dest="monitoringcommand")
def monitoringcommand(args=[], parent=monitoring_parser):
return get_decorator(args, parent)
authcli = ArgumentParser(
description='API command')
auth_parser = authcli.add_subparsers(dest="authcommand")
def authcommand(args=[], parent=auth_parser):
return get_decorator(args, parent)
def cli_version():
return __version__
def version():
print(f"Cli - Version : {cli_version()}")
@dockercommand([
argument("-n", "--nodetype", required=True, default="fullnode", help="Type of node fullnode or archivenode",
action="store", choices=["fullnode", "archivenode"]),
argument("-t", "--trustednode", required=True,
help="Trusted node on radix network. Example format: radix//[email protected]",
action="store"),
argument("-u", "--update", help="Update the node to new version of composefile", action="store_false"),
])
def setup(args):
release = latest_release()
composefileurl = os.getenv(COMPOSE_FILE_OVERIDE,
f"https://raw.githubusercontent.com/radixdlt/node-runner/{cli_version()}/node-runner-cli/release_ymls/radix-{args.nodetype}-compose.yml")
print(f"Going to setup node type {args.nodetype} from location {composefileurl}.\n")
# TODO autoapprove
continue_setup = input(
"Do you want to continue [Y/n]?:")
if not Helpers.check_Yes(continue_setup):
print(" Quitting ....")
sys.exit()
keystore_password, file_location = Base.generatekey(keyfile_path=Helpers.get_keyfile_path(), keygen_tag=release)
Docker.setup_compose_file(composefileurl, file_location)
trustednode_ip = Helpers.parse_trustednode(args.trustednode)
compose_file_name = composefileurl.rsplit('/', 1)[-1]
action = "update" if args.update else "start"
print(f"About to {action} the node using docker-compose file {compose_file_name}, which is as below")
run_shell_command(f"cat {compose_file_name}", shell=True)
# TODO AutoApprove
should_start = input(f"\nOkay to start the node [Y/n]?:")
if Helpers.check_Yes(should_start):
if action == "update":
print(f"For update, bringing down the node using compose file {compose_file_name}")
Docker.run_docker_compose_down(compose_file_name)
Docker.run_docker_compose_up(keystore_password, compose_file_name, args.trustednode)
else:
print(f"""
---------------------------------------------------------------
Bring up node by updating the file {compose_file_name}
You can do it through cli using below command
radixnode docker stop -f {compose_file_name}
radixnode docker start -f {compose_file_name} -t {args.trustednode}
----------------------------------------------------------------
""")
@systemdcommand([
argument("-r", "--release",
help="Version of node software to install",
action="store"),
argument("-x", "--nginxrelease", help="Version of radixdlt nginx release ",action="store"),
argument("-t", "--trustednode", required=True, help="Trusted node on radix network", action="store"),
argument("-n", "--nodetype", required=True, default="fullnode", help="Type of node fullnode or archivenode",
action="store", choices=["fullnode", "archivenode"]),
argument("-i", "--hostip", required=True, help="Static Public IP of the node", action="store"),
argument("-u", "--update", help="Update the node to new version of node", action="store_false"),
])
def setup(args):
if not args.release:
release = latest_release()
else:
release = args.release
if not args.nginxrelease:
nginx_release = latest_release("radixdlt/radixdlt-nginx")
else:
nginx_release = args.nginxrelease
if args.nodetype == "archivenode":
node_type_name = 'archive'
elif args.nodetype == "fullnode":
node_type_name = 'fullnode'
else:
print(f"Node type - {args.nodetype} specificed should be either archivenode or fullnode")
sys.exit()
node_dir = '/etc/radixdlt/node'
nginx_dir = '/etc/nginx'
nginx_secrets_dir = f"{nginx_dir}/secrets"
node_secrets_dir = f"{node_dir}/secrets"
nodebinaryUrl = os.getenv(NODE_BINARY_OVERIDE,
f"https://github.com/radixdlt/radixdlt/releases/download/{release}/radixdlt-dist-{release}.zip")
# TODO add method to fetch latest nginx release
nginxconfigUrl = os.getenv(NGINX_BINARY_OVERIDE,
f"https://github.com/radixdlt/radixdlt-nginx/releases/download/{nginx_release}/radixdlt-nginx-{node_type_name}-conf.zip")
# TODO AutoApprove
continue_setup = input(
f"Going to setup node type {args.nodetype} for version {release} from location {nodebinaryUrl} and {nginxconfigUrl}. \n Do you want to continue Y/n:")
if not Helpers.check_Yes(continue_setup):
print(" Quitting ....")
sys.exit()
backup_time = Helpers.get_current_date_time()
SystemD.checkUser()
keystore_password, keyfile_location = SystemD.generatekey(node_secrets_dir, keygen_tag=release)
trustednode_ip = Helpers.parse_trustednode(args.trustednode)
SystemD.backup_file(node_secrets_dir, f"environment", backup_time)
SystemD.set_environment_variables(keystore_password, node_secrets_dir)
SystemD.backup_file(node_dir, f"default.config", backup_time)
SystemD.setup_default_config(trustednode=args.trustednode, hostip=args.hostip, node_dir=node_dir,
node_type=args.nodetype)
node_version = nodebinaryUrl.rsplit('/', 2)[-2]
SystemD.backup_file("/etc/systemd/system", "radixdlt-node.service", backup_time)
SystemD.setup_service_file(node_version, node_dir=node_dir, node_secrets_path=node_secrets_dir)
SystemD.download_binaries(nodebinaryUrl, node_dir=node_dir, node_version=node_version)
SystemD.backup_file("/lib/systemd/system", f"nginx.service", backup_time)
nginx_configured = SystemD.setup_nginx_config(nginx_config_location_Url=nginxconfigUrl,
node_type=args.nodetype,
nginx_etc_dir=nginx_dir, backup_time=backup_time)
SystemD.create_ssl_certs(nginx_secrets_dir)
if not args.update:
SystemD.start_node_service()
else:
SystemD.restart_node_service()
if nginx_configured and not args.update:
SystemD.start_nginx_service()
elif nginx_configured and args.update:
SystemD.start_nginx_service()
else:
print("Nginx not configured or not updated")
@systemdcommand([
argument("-s", "--services", default="all",
help="Name of the service either to be stopped. Valid values nginx or radixdlt-node",
choices=["all", "nginx", "radixdlt-node"], action="store")
])
def stop(args):
if args.services == "all":
SystemD.stop_nginx_service()
SystemD.stop_node_service()
elif args.services == "nginx":
SystemD.stop_nginx_service()
elif args.services == "radixdlt-node":
SystemD.stop_node_service()
else:
print(f"Invalid service name {args.services}")
sys.exit()
@systemdcommand([
argument("-s", "--services", default="all",
help="Name of the service either to be started. Valid values nginx or radixdlt-node",
choices=["all", "nginx", "radixdlt-node"], action="store")
])
def restart(args):
if args.services == "all":
SystemD.restart_node_service()
SystemD.restart_nginx_service()
elif args.services == "nginx":
SystemD.restart_nginx_service()
elif args.services == "radixdlt-node":
SystemD.restart_node_service()
else:
print(f"Invalid service name {args.services}")
sys.exit()
@dockercommand([
argument("-f", "--composefile", required=True, help="The name of compose file ", action="store"),
argument("-t", "--trustednode", required=True, help="Trusted node on radix network", action="store")
])
def start(args):
release = latest_release()
keystore_password, keyfile_location = Base.generatekey(keyfile_path=Helpers.get_keyfile_path(), keygen_tag=release)
Docker.run_docker_compose_up(keystore_password, args.composefile, args.trustednode)
@dockercommand([
argument("-f", "--composefile", required=True, help="The name of compose file ", action="store"),
argument("-v", "--removevolumes", help="Remove the volumes ", action="store_true"),
])
def stop(args):
if args.removevolumes:
print(
"""
Removing volumes including Nginx volume. Nginx password needs to be recreated again when you bring node up
""")
Docker.run_docker_compose_down(args.composefile, args.removevolumes)
@dockercommand([])
def configure(args):
Base.install_dependecies()
Base.add_user_docker_group()
@systemdcommand([])
def configure(args):
Base.install_dependecies()
SystemD.install_java()
SystemD.setup_user()
SystemD.make_etc_directory()
SystemD.make_data_directory()
SystemD.create_service_user_password()
SystemD.create_initial_service_file()
SystemD.sudoers_instructions()
@authcommand(
[
argument("-m", "--setupmode", required=True, help="Setup type whether it is DOCKER or SYSTEMD",
choices=["DOCKER", "SYSTEMD"], action="store"),
argument("-u", "--username", default="admin", help="Name of admin user", action="store")
])
def set_admin_password(args):
set_auth(args, usertype="admin")
@authcommand(
[
argument("-m", "--setupmode", required=True, help="Setup type whether it is DOCKER or SYSTEMD",
choices=["DOCKER", "SYSTEMD"], action="store"),
argument("-u", "--username", default="metrics", help="Name of metrics user", action="store")
])
def set_metrics_password(args):
set_auth(args, usertype="metrics")
@authcommand(
[
argument("-m", "--setupmode", required=True, help="Setup type whether it is DOCKER or SYSTEMD",
choices=["DOCKER", "SYSTEMD"], action="store"),
argument("-u", "--username", default="superadmin", help="Name of metrics user", action="store")
])
def set_superadmin_password(args):
set_auth(args, usertype="superadmin")
def set_auth(args, usertype):
if args.setupmode == "DOCKER":
Docker.setup_nginx_Password(usertype, args.username)
elif args.setupmode == "SYSTEMD":
SystemD.checkUser()
SystemD.install_nginx()
SystemD.setup_nginx_password("/etc/nginx/secrets", usertype, args.username)
else:
print("Invalid setupmode specified. It should be either DOCKER or SYSTEMD.")
"""
Below is the list of API commands
"""
@validationcommand()
def get_node_info(args):
Validation.get_node_info()
@validationcommand()
def get_current_epoch_data(args):
Validation.get_current_epoch_data()
@accountcommand()
def update_validator_config(args):
request_data = {
"jsonrpc": "2.0",
"method": "account.submit_transaction_single_step",
"params": {
"actions": []
},
"id": 1
}
RestApi.check_health()
validator_info = Validation.get_validator_info_json()
user = Helpers.get_nginx_user(usertype="superadmin", default_username="superadmin")
request_data = Account.register_steps(request_data, validator_info)
request_data = Account.update_steps(request_data, validator_info)
request_data = Account.add_validation_fee(request_data, validator_info)
request_data = Account.setup_update_delegation(request_data, validator_info)
request_data = Account.add_change_ownerid(request_data, validator_info)
print(f"{bcolors.WARNING}\nAbout to update node account with following{bcolors.ENDC}")
print(f"")
print(f"{bcolors.BOLD}{json.dumps(request_data, indent=4, sort_keys=True)}{bcolors.ENDC}")
submit_changes = input(f"{bcolors.BOLD}\nDo you want to continue [Y/n]{bcolors.ENDC}")
if Helpers.check_Yes(submit_changes) and len(request_data["params"]["actions"]) != 0:
Account.post_on_account(json.dumps(request_data))
else:
print(f"{bcolors.WARNING} Changes were not submitted.{bcolors.ENDC} or there are no actions to submit")
# @accountcommand()
# def unregister_validator(args):
# RestApi.check_health()
# Account.un_register_validator()
@accountcommand()
def get_info(args):
Account.get_info()
@systemapicommand()
def api_get_configuration(args):
System.api_get_configuration()
@systemapicommand()
def api_get_data(args):
System.api_get_data()
@systemapicommand()
def bft_get_configuration(args):
System.bft_get_configuration()
@systemapicommand()
def bft_get_data(args):
System.bft_get_data()
@systemapicommand()
def mempool_get_configuration(args):
System.mempool_get_configuration()
@systemapicommand()
def mempool_get_data(args):
System.mempool_get_data()
@systemapicommand()
def ledger_get_latest_proof(args):
System.ledger_get_latest_proof()
@systemapicommand()
def ledger_get_latest_epoch_proof(args):
System.ledger_get_latest_epoch_proof()
@systemapicommand()
def radix_engine_get_configuration(args):
System.radix_engine_get_configuration()
@systemapicommand()
def radix_engine_get_data(args):
System.radix_engine_get_data()
@systemapicommand()
def sync_get_configuration(args):
System.sync_get_configuration()
@systemapicommand()
def sync_get_data(args):
System.sync_get_data()
@systemapicommand()
def networking_get_configuration(args):
System.networking_get_configuration()
@systemapicommand()
def networking_get_peers(args):
System.networking_get_peers()
@systemapicommand()
def networking_get_data(args):
System.networking_get_data()
@systemapicommand()
def checkpoints_get_checkpoints(args):
System.checkpoints_get_checkpoints()
@monitoringcommand(
[argument("-m", "--setupmode", default="QUICK_SETUP_MODE",
help="Setup type whether it is QUICK_SETUP_MODE or PRODUCTION_MODE",
action="store")])
def setup(args):
if args.setupmode == "QUICK_SETUP_MODE":
monitor_url_dir = f'https://raw.githubusercontent.com/radixdlt/node-runner/{cli_version()}/monitoring'
print(f"Downloading artifacts from {monitor_url_dir}\n")
Monitoring.setup_prometheus_yml(f"{monitor_url_dir}/prometheus/prometheus.yml")
Monitoring.setup_datasource(f"{monitor_url_dir}/grafana/provisioning/datasources/datasource.yml")
Monitoring.setup_dashboard(f"{monitor_url_dir}/grafana/provisioning/dashboards/",
["dashboard.yml", "sample-node-dashboard.json"])
Monitoring.setup_monitoring_containers(f"{monitor_url_dir}/node-monitoring.yml")
Monitoring.setup_external_volumes()
monitoring_file_location = "monitoring/node-monitoring.yml"
Monitoring.start_monitoring(f"{monitoring_file_location}")
elif args.setupmode == "PRODUCTION_MODE":
print(" PRODUCTION_MODE not supported yet ")
sys.exit()
else:
print("Invalid setup mode . It should be either QUICK_SETUP_MODE or PRODUCTION_MODE")
@monitoringcommand(
[
argument("-f", "--composefile", default="monitoring/node-monitoring.yml", action="store"),
argument("-m", "--setupmode", default="QUICK_SETUP_MODE",
help="Setup type whether it is QUICK_SETUP_MODE or PRODUCTION_MODE",
action="store")
]
)
def start(args):
if args.setupmode == "QUICK_SETUP_MODE":
Monitoring.start_monitoring(f"monitoring/node-monitoring.yml")
elif args.setupmode == "PRODUCTION_MODE":
print(" PRODUCTION_MODE not supported yet ")
sys.exit()
else:
print("Invalid setup mode . It should be either QUICK_SETUP_MODE or PRODUCTION_MODE")
@monitoringcommand([
argument("-m", "--setupmode", default="QUICK_SETUP_MODE",
help="Setup type whether it is QUICK_SETUP_MODE or PRODUCTION_MODE",
action="store"),
argument("-v", "--removevolumes", help="Remove the volumes ", action="store_true")])
def stop(args):
if args.setupmode == "QUICK_SETUP_MODE":
Monitoring.stop_monitoring(f"monitoring/node-monitoring.yml", args.removevolumes)
elif args.setupmode == "PRODUCTION_MODE":
print(" PRODUCTION_MODE not supported yet ")
sys.exit()
else:
print("Invalid setup mode . It should be either QUICK_SETUP_MODE or PRODUCTION_MODE")
def optimise_node():
Base.setup_node_optimisation_config(cli_version())
def check_latest_cli():
cli_latest_version = latest_release("radixdlt/node-runner")
if os.getenv(DISABLE_VERSION_CHECK, "False").lower() not in ("true", "yes"):
if cli_version() != cli_latest_version:
os_name = "ubuntu-20.04"
print(
f"Radixnode CLI latest version is {cli_latest_version} and current version of the binary is {cli_version()}.\n.")
print(f"""
---------------------------------------------------------------
Update the CLI by running these commands
wget -O radixnode https://github.com/radixdlt/node-runner/releases/download/{cli_latest_version}/radixnode-{os_name}
chmod +x radixnode
sudo mv radixnode /usr/local/bin
""")
abort = input("Do you want to ABORT the command now to update the cli Y/n?:")
if Helpers.check_Yes(abort):
sys.exit()
def handle_validation():
args = validationcli.parse_args(sys.argv[3:])
if args.validationcommand is None:
validationcli.print_help()
else:
args.func(args)
def handle_account():
args = accountcli.parse_args(sys.argv[3:])
if args.accountcommand is None:
accountcli.print_help()
else:
args.func(args)
def handle_systemapi():
args = systemapicli.parse_args(sys.argv[3:])
if | |
num=nfft_half)
m_ns_mag[~vb_voi,:] = m_ns_mag[~vb_voi,:] * v_slope
# Merge data:--------------------------------------------------------------
cf_mag = 5000 #5000
bw_mag = 2000 #2000
cf_cmpx = cf_mag #5000
bw_cmpx = bw_mag #2000
# Alloc:
m_mag_ap = np.zeros((nfrms, nfft_half))
m_mag_det = np.zeros((nfrms, nfft_half))
# Working:
m_ph = np.angle(m_real + m_imag *1j)
m_ph_ns = np.angle(m_ns_real + m_ns_imag *1j)
m_ph_ap = m_ph_ns
m_ph_det = m_ph
m_mag_zeros = np.zeros((nfrms, nfft_half))
if b_use_ap_voi:
# Mag - ap:
m_mag_ap[vb_voi,:] = la.spectral_crossfade(m_mag_zeros[vb_voi,:], m_mag[vb_voi,:] * m_ns_mag[vb_voi,:], cf_mag, bw_mag, fs, freq_scale='hz')
m_mag_ap[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
#-------------------------------------------------------------------------------
# Mag - det:
m_mag_det[vb_voi,:] = la.spectral_crossfade(m_mag[vb_voi,:], m_mag_zeros[vb_voi,:], cf_mag, bw_mag, fs, freq_scale='hz')
else: # Check:
# Mag - ap:
m_mag_ap[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
# Mag - det:
m_mag_det[vb_voi,:] = m_mag[vb_voi,:]
# Debug:
m_syn_cmplx = m_mag_ap * np.exp(m_ph_ap * 1j) + m_mag_det * np.exp(m_ph_det * 1j)
m_syn_cmplx = la.add_hermitian_half(m_syn_cmplx , data_type='complex')
# bin width: bw=11.71875 Hz
# Final synth:-------------------------------------------------------------
m_syn_td = np.fft.ifft(m_syn_cmplx).real
m_syn_td = np.fft.fftshift(m_syn_td, axes=1)
v_syn_sig = la.ola(m_syn_td, v_pm, win_func=None)
# HPF:---------------------------------------------------------------------
fc = 60
order = 4
fc_norm = fc / (fs / 2.0)
bc, ac = signal.ellip(order,0.5 , 80, fc_norm, btype='highpass')
v_syn_sig = signal.lfilter(bc, ac, v_syn_sig)
# la.write_audio_file(out_dir + '/' + filename + suffix + '.wav', v_sig_syn, fs)
return v_syn_sig
'''
'''
def synthesis_with_del_comp_and_ph_encoding5(m_mag_mel_log, m_real_mel, m_imag_mel, v_f0, nfft, fs, mvf, f0_type='lf0', hf_slope_coeff=1.0, b_use_ap_voi=True, b_voi_ap_win=True):
if f0_type=='lf0':
v_f0 = np.exp(v_f0)
nfrms, ncoeffs_mag = m_mag_mel_log.shape
ncoeffs_comp = m_real_mel.shape[1]
nfft_half = nfft / 2 + 1
# Magnitude mel-unwarp:----------------------------------------------------
m_mag = np.exp(la.sp_mel_unwarp(m_mag_mel_log, nfft_half, alpha=0.77, in_type='log'))
# Complex mel-unwarp:------------------------------------------------------
f_intrp_real = interpolate.interp1d(np.arange(ncoeffs_comp), m_real_mel, kind='nearest', fill_value='extrapolate')
f_intrp_imag = interpolate.interp1d(np.arange(ncoeffs_comp), m_imag_mel, kind='nearest', fill_value='extrapolate')
m_real_mel = f_intrp_real(np.arange(ncoeffs_mag))
m_imag_mel = f_intrp_imag(np.arange(ncoeffs_mag))
# Debug:-------------------------------------------------------------------
#m_real_mel = np.pad(m_real_mel, ((0,0),(0,ncoeffs_mag-ncoeffs_comp)), 'constant', constant_values=0)
#m_imag_mel = np.pad(m_imag_mel, ((0,0),(0,ncoeffs_mag-ncoeffs_comp)), 'constant', constant_values=0)
m_real = la.sp_mel_unwarp(m_real_mel, nfft_half, alpha=0.77, in_type='log')
m_imag = la.sp_mel_unwarp(m_imag_mel, nfft_half, alpha=0.77, in_type='log')
# Debug:-------------------------------------------------------------------
#m_cmpx_orig_mag = np.absolute(m_real + m_imag * 1j)
#m_real = m_real / m_cmpx_orig_mag
#m_imag = m_imag / m_cmpx_orig_mag
# Noise Gen:---------------------------------------------------------------
v_shift = f0_to_shift(v_f0, fs, unv_frm_rate_ms=5).astype(int)
v_pm = la.shift_to_pm(v_shift)
ns_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_ns = np.random.uniform(-1, 1, ns_len)
# Noise Windowing:---------------------------------------------------------
l_ns_win_funcs = [ np.hanning ] * nfrms
vb_voi = v_f0 > 1 # case voiced (1 is used for safety)
if b_voi_ap_win:
for i in xrange(nfrms):
if vb_voi[i]:
l_ns_win_funcs[i] = voi_noise_window
l_frm_ns, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_ns, v_pm, win_func=l_ns_win_funcs) # Checkear!!
m_frm_ns = la.frm_list_to_matrix(l_frm_ns, v_shift, nfft)
m_frm_ns = np.fft.fftshift(m_frm_ns, axes=1)
m_ns_cmplx = la.remove_hermitian_half(np.fft.fft(m_frm_ns))
m_ns_mag, m_ns_real, m_ns_imag = get_fft_params_from_complex_data(m_ns_cmplx)
# Norm:
rms_noise = np.sqrt(np.mean(m_ns_mag**2)) # checkear!!!!
m_ns_mag = m_ns_mag / rms_noise
# HF - enhancement:
v_slope = np.linspace(1, hf_slope_coeff, num=nfft_half)
m_ns_mag[~vb_voi,:] = m_ns_mag[~vb_voi,:] * v_slope
# Merge data:--------------------------------------------------------------
#cf_mag = 5000 #5000
#bw_mag = 2000 #2000
cf_mag = 6000 #5000
bw_mag = 4000 #2000
cf_cmpx = cf_mag #5000
bw_cmpx = bw_mag #2000
# Alloc:
m_mag_syn = np.ones((nfrms, nfft_half))
m_real_syn = np.zeros((nfrms, nfft_half))
m_imag_syn = np.zeros((nfrms, nfft_half))
if b_use_ap_voi:
# Mag:
m_mag_syn[vb_voi,:] = la.spectral_crossfade(m_mag[vb_voi,:], m_mag[vb_voi,:] * m_ns_mag[vb_voi,:], cf_mag, bw_mag, fs, freq_scale='hz')
m_mag_syn[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
#Compx - Voi:
m_real_syn[vb_voi,:] = la.spectral_crossfade(m_real[vb_voi,:], m_ns_real[vb_voi,:], cf_cmpx, bw_cmpx, fs, freq_scale='hz')
m_imag_syn[vb_voi,:] = la.spectral_crossfade(m_imag[vb_voi,:], m_ns_imag[vb_voi,:], cf_cmpx, bw_cmpx, fs, freq_scale='hz')
#Compx - Unv:
m_real_syn[~vb_voi,:] = m_ns_real[~vb_voi,:]
m_imag_syn[~vb_voi,:] = m_ns_imag[~vb_voi,:]
else:
# Mag:
m_mag_syn[vb_voi,:] = m_mag[vb_voi,:]
m_mag_syn[~vb_voi,:] = m_mag[~vb_voi,:] * m_ns_mag[~vb_voi,:]
# Compx - Voi:
m_real_syn[vb_voi,:] = m_real[vb_voi,:]
m_imag_syn[vb_voi,:] = m_imag[vb_voi,:]
# Compx - Unv:
m_real_syn[~vb_voi,:] = m_ns_real[~vb_voi,:]
m_imag_syn[~vb_voi,:] = m_ns_imag[~vb_voi,:]
# Final synth:-------------------------------------------------------------
# Debug:--------------------------------------------------
g = (m_mag_syn * m_real_syn + m_mag_syn * m_imag_syn * 1j) / m_cmpx_mag
m_g_mag = np.absolute(g)
m_g_ph = np.angle(g)
#m_ph = np.angle(m_real_syn + m_imag_syn *1j)
#m_syn = m_mag_syn * np.exp(m_ph * 1j)
#m_syn = la.add_hermitian_half(m_syn, data_type='complex')
#m_syn = la.add_hermitian_half(m_mag_syn * m_real_syn + m_mag_syn * m_imag_syn * 1j, data_type='complex')
#------------------------------------------------------------------------
m_cmpx_mag = np.absolute(m_real_syn + m_imag_syn * 1j)
m_syn = la.add_hermitian_half((m_mag_syn * m_real_syn + m_mag_syn * m_imag_syn * 1j) / m_cmpx_mag, data_type='complex')
m_syn = np.fft.ifft(m_syn).real
m_syn = np.fft.fftshift(m_syn, axes=1)
v_sig_syn = la.ola(m_syn, v_pm, win_func=None)
# HPF:---------------------------------------------------------------------
fc = 60
order = 4
fc_norm = fc / (fs / 2.0)
bc, ac = signal.ellip(order,0.5 , 80, fc_norm, btype='highpass')
v_sig_syn = signal.lfilter(bc, ac, v_sig_syn)
return v_sig_syn, m_syn, m_mag_syn, m_real_syn, m_imag_syn
'''
#==============================================================================
# v2: Improved phase generation.
# v3: specific window handling for aperiodic spectrum in voiced segments.
# v4: Splitted window support
# If ph_hf_gen=='rand', generates random numbers for the phase above mvf
# If ph_hf_gen=='template_mask', uses a phase template to fill the gaps given by the aperiodic mask.
# If ph_hf_gen=='rand_mask' The same as above, but it uses random numbers instead of a template.
# The aperiodic mask is computed (estimated) according to the total phase energy per frame.
# v_voi: Used to construct the ap mask:
# if v_voi[n] > 0, frame is voiced. If v_voi[n] == 0, frame is unvoiced.
# If v_voy=='estim', the mask is estimated from phase data.
def synthesis_with_del_comp_and_ph_encoding4(m_spmgc, m_phs_mgc, m_phc_mgc, v_shift, nFFT, fs, mvf, v_voi, b_medfilt=False, win_func=None):
#Protection:
v_shift = v_shift.astype(int)
# Ph and MVF:
mvf_bin = lu.round_to_int(mvf * nFFT / np.float(fs))
nFFThalf_ph = la.next_pow_of_two(mvf_bin) + 1
# MGC to Ph up to MVF:
m_phs_shrt_intrp_syn = la.mcep_to_sp_sptk(m_phs_mgc, 2*(nFFThalf_ph-1), out_type=0)
m_phc_shrt_intrp_syn = la.mcep_to_sp_sptk(m_phc_mgc, 2*(nFFThalf_ph-1), out_type=0)
f_interps_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phs_shrt_intrp_syn, kind='cubic')
f_interpc_syn = interpolate.interp1d(np.arange(nFFThalf_ph), m_phc_shrt_intrp_syn, kind='cubic')
m_phs_shrt_syn = f_interps_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
m_phc_shrt_syn = f_interpc_syn(np.linspace(0,nFFThalf_ph-1,mvf_bin))
# Deterministic Phase decoding:----------------------
nfrms = np.size(m_phs_shrt_syn,0)
nFFThalf = nFFT / 2 + 1
m_phs_shrt_syn = np.clip(m_phs_shrt_syn, -1, 1)
m_phc_shrt_syn = np.clip(m_phc_shrt_syn, -1, 1)
m_ph_deter = ph_dec(m_phs_shrt_syn, m_phc_shrt_syn, mode='angle')
#m_ph_deter = np.hstack((m_ph_deter, np.zeros((nfrms,nFFThalf-mvf_bin))))
# Debug:
f = interpolate.interp1d(np.arange(mvf_bin), m_ph_deter, kind='nearest', fill_value='extrapolate')
m_ph_deter = f(np.arange(nFFThalf))
# TD Noise Gen:---------------------------------------
v_pm = la.shift_to_pm(v_shift)
sig_len = v_pm[-1] + (v_pm[-1] - v_pm[-2])
v_noise = np.random.uniform(-1, 1, sig_len)
#v_noise = np.random.normal(size=sig_len)
# Extract noise magnitude and phase for unvoiced segments: (TODO: make it more efficient!)-------------------------------
win_func_unv = np.hanning
if win_func is la.cos_win:
win_func_unv = la.cos_win
l_frm_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=win_func_unv)
m_frm_noise = la.frm_list_to_matrix(l_frm_noise, v_shift, nFFT)
m_frm_noise = np.fft.fftshift(m_frm_noise, axes=1)
'''
# Debug - randomise sequence of noise frames (NO BORRAR!):
v_new_nx = np.random.randint(nfrms, size=nfrms)
m_frm_noise = m_frm_noise[v_new_nx,:]
#------------------------------------------
'''
m_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_noise))
m_noise_ph = np.angle(m_noise_sp)
m_noise_mag = np.absolute(m_noise_sp)
m_noise_mag_log = np.log(m_noise_mag)
# Noise amp-normalisation:
rms_noise = np.sqrt(np.mean(m_noise_mag**2))
m_noise_mag_log = m_noise_mag_log - np.log(rms_noise)
# Extract noise magnitude and phase for voiced segments: (TODO: make it more efficient!)-------------------------------------
l_frm_voi_noise, v_lens, v_pm_plus, v_shift_dummy, v_rights = windowing(v_noise, v_pm, win_func=voi_noise_window)
m_frm_voi_noise = la.frm_list_to_matrix(l_frm_voi_noise, v_shift, nFFT)
m_frm_voi_noise = np.fft.fftshift(m_frm_voi_noise, axes=1)
m_voi_noise_sp = la.remove_hermitian_half(np.fft.fft(m_frm_voi_noise))
m_voi_noise_ph = np.angle(m_voi_noise_sp)
m_voi_noise_mag = np.absolute(m_voi_noise_sp)
m_voi_noise_mag_log = np.log(m_voi_noise_mag)
# Noise amp-normalisation:
rms_voi_noise = np.sqrt(np.mean(m_voi_noise_mag**2))
m_voi_noise_mag_log = m_voi_noise_mag_log - np.log(rms_voi_noise)
#------------------------------------------------------------------------------------------------------------------------------
# ap mask:
v_voi_mask = np.clip(v_voi, 0, 1)
# target sp from mgc:
m_sp_targ = la.mcep_to_sp_sptk(m_spmgc, nFFT)
# medfilt:
if b_medfilt:
m_sp_targ = signal.medfilt(m_sp_targ, kernel_size=[3,1])
'''
# Debug - Minimum phase filter for ap signal (NO BORRAR!):
m_sp_comp_mph = la.sp_to_min_phase(m_sp_targ, in_type='sp')
m_sp_ph_mph = np.angle(m_sp_comp_mph)
m_noise_ph = m_noise_ph + m_sp_ph_mph
m_voi_noise_ph = m_voi_noise_ph + m_sp_ph_mph
'''
# Alloc:
m_frm_syn = np.zeros((nfrms, nFFT))
m_mag_syn = np.zeros((nfrms, nFFThalf)) # just for debug
m_mag = np.zeros((nfrms, nFFThalf)) # just for debug
# Spectral crossfade constants (TODO: Improve this):
muf = 3500 # "minimum unvoiced freq."
bw = (mvf - muf) - 20 # values found empirically. assuming mvf > 4000
cut_off = (mvf + muf) / 2
v_zeros = np.zeros((1,nFFThalf))
# Iterates through frames:
for i in | |
hlp = {
# Main Menu
"Initial Setup": """
The initial setup required to proceed with the rest of the migration. This
should be configured first, before anything else. The information in this menu
allows the tool to connect to both Nexus and Artifactory, so that it can
properly configure and execute the migration.""",
"Repository Migration Setup": """
Set up migration for repositories. Choose which repositories are to be migrated
and which are not, and modify those repositories' details as necessary.""",
"Security Migration Setup": """
Set up migration for security settings, including users, groups, permissions,
and LDAP settings.""",
"Options Migration Setup": """
Set up migration for miscellaneous options. None are implemented yet.""",
"Save Config JSON File": """
Save the migration configuration to a JSON file. The save and load options are
useful if you need to exit the tool and resume migration later, or if you would
like to back up your configuration to revert to in the future.""",
"Load Config JSON File": """
Load the migration configuration from a JSON file. The save and load options are
useful if you need to exit the tool and resume migration later, or if you would
like to back up your configuration to revert to in the future.""",
"Verify Configuration": """
Verify that the migration configuration is valid. This refreshes the connections
to Artifactory and Nexus, so if a change in either instance has invalidated the
configuration, the new verification statuses will reflect that.""",
"Run Migration": """
Execute the configured migration. This will run everything in the configuration,
migrating all settings and artifacts from Nexus to Artifactory. Artifacts will
only be migrated if they don't already exist in Artifactory, so running this
multiple times in a row will only migrate new artifacts each time.""",
# Initial Setup Menu
"Nexus Data Directory": """
The local file path to the Nexus instance. For efficiency reasons, this tool
requires file system access to the Nexus server, and so must be run on the same
server, or on a computer with access to the file system where Nexus is
installed, and must be run by a user with read access to the Nexus
installation's directory tree. The correct path to put in this field is a
directory containing the folders: 'conf', 'indexer', 'logs', 'storage',
'timeline', and others (or in Nexus 3: 'blobs', 'cache', 'db', 'etc', 'log', and
others).""",
"Nexus URL": """
The URL of the Nexus server. This allows the tool to access the Nexus instance
via its REST and Integrations API.""",
"Nexus Username": """
The username of an administrative user on the Nexus server. The tool uses this
user to log in to Nexus.""",
"Nexus Password": """
The password of an administrative user on the Nexus server. The tool uses this
user to log in to Nexus.""",
"Artifactory URL": """
The URL of the Artifactory server. This allows the tool to access the
Artifactory instance via its REST API.""",
"Artifactory Username": """
The username of an administrative user on the Artifactory server. The tool uses
this user to log in to Artifactory.""",
"Artifactory Password": """
The password, or an API key, of an administrative user on the Artifactory
server. The tool uses this user to log in to Artifactory.""",
# Repository Migration Setup Menu
"Edit Repository": """
Press 'e' followed by a number key to edit a repository migration in detail.
Pressing the number key on its own will simply toggle whether or not the
repository will be migrated.""",
# Edit Repository Menu
"Repo Name (Nexus)": """
The name of the Nexus repository, prior to migration. This field is not
editable, as it simply shows the name of the repository as it is on the Nexus
server. To change the repository name, modify the "Repo Name (Artifactory)"
field.""",
"Repo Name (Artifactory)": """
The name that will be given to the repository when it is migrated to
Artifactory. This defaults to the name of the Nexus repository, but can be
changed.""",
"Migrate This Repo": """
Whether to migrate this repository. If this is unchecked, this repository will
not be migrated.""",
"Repo Class": """
The repository's class type: local (hosted), remote (proxy), virtual (group), or
shadow (virtual, in Nexus).""",
"Repo Type": """
The repository's package type (e.g. maven1, maven2, yum, nuget, etc).""",
"Repo Description": """
The repository's description attribute. Defaults to the Nexus repository's
"display name" attribute.""",
"Repo Layout": """
The repository's layout. This is automatically set to the default layout for the
repository's package type, but it can be set to another layout if necessary.""",
"Handles Releases": """
Whether the repository handles release builds.""",
"Handles Snapshots": """
Whether the repository handles snapshot builds.""",
"Suppresses Pom Consistency Checks": """
Whether the repository should suppress checking if artifacts are consistent with
their pom files.""",
"Maven Snapshot Version Behavior": """
Defines the Maven Snapshot Version Behavior (non-unique, unique or
deployer).""",
"Max Unique Snapshots": """
Number of retained versions of unique snpashots (irrelevant when choosing
non-unique version behavior).""",
"Remote URL": """
The remote URL of the repository to proxy.""",
# Security Migration Setup Menu
"Users Migration Setup": """
Set up migration for users. Choose which users are to be migrated and which are
not, and modify those users' details as necessary.""",
"Groups Migration Setup": """
Set up migration for groups. Choose which groups are to be migrated and which
are not, and modify those groups' details as necessary.""",
"Permissions Migration Setup": """
Set up migration for permissions. Choose which permissions are to be migrated
and which are not, and modify those permissions' details as necessary.""",
"LDAP Migration Setup": """
Set up migration for LDAP. Choose which LDAP configurations are to be migrated
and which are not, and modify those configurations' details as necessary.""",
# Users Migration Setup Menu
"Default Password": """
The default temporary password assigned to migrated users. Since Nexus hashes
passwords, they cannot be migrated like other data. They must be manually set,
either during configuration, or during the user's first successful login. All
users in the latter category can use this password to log into Artifactory, at
which point they will be prompted to set a new password.""",
"Edit User": """
Press 'e' followed by a number key to edit a user migration in detail. Pressing
the number key on its own will simply toggle whether or not the user will be
migrated.""",
# Edit User Menu
"User Name (Nexus)": """
The name of the Nexus user, prior to migration. This field is not editable, as
it simply shows the name of the user as it is on the Nexus server. To change the
user name, modify the "User Name (Artifactory)" field.""",
"User Name (Artifactory)": """
The name that will be given to the user when it is migrated to Artifactory. This
defaults to the name of the Nexus user, but can be changed.""",
"Migrate This User": """
Whether to migrate this user. If this is unchecked, this user will not be
migrated.""",
"Realm": """
This user's security realm. This is generally either 'internal' or 'ldap'.""",
"Email Address": """
The email address associated with this user.""",
"Password": """
The password assigned to this user. Since Nexus hashes passwords, they cannot be
migrated like other data. They must be manually set, either during
configuration, or during the user's first successful login. All users in the
former category can use this value as their password. For security reasons,
admin users are required to set their passwords in this way.""",
"Groups": """
The groups this user belongs to.""",
"Is An Administrator": """
Whether the user has administrative privileges. This differs from the Nexus
model, which has special 'application' privileges that grant various
administrative abilities to users; in Artifactory, admin users have all of these
abilities, and all other users do not. For security reasons, passwords must be
explicitly set for admin users, as opposed to using a temporary password.""",
"Is Enabled": """
Whether this user is enabled. If this is unchecked, this user will be unable to
log in to Artifactory or use their account in any way, until an administrator
re-enabled the account.""",
# Groups Migration Setup Menu
"Edit Group": """
Press 'e' followed by a number key to edit a group migration in detail. Pressing
the number key on its own will simply toggle whether or not the group will be
migrated.""",
# Edit Group Menu
"Group Name (Nexus)": """
The name of the Nexus role, prior to migration. This field | |
import dataclasses
from abc import abstractmethod, ABC
from typing import Optional, Tuple
import torch
import falkon
from falkon.options import FalkonOptions
__all__ = ("Loss", "LogisticLoss")
class Loss(ABC):
r"""Abstract generalized self-concordant loss function class.
Such loss functions must be three times differentiable; but for the logistic Falkon algorithm
only the first two derivatives are used.
Subclasses must implement the :meth:`__call__` method which calculates the loss function
given two input vectors (the inputs could also be matrices e.g. for the softmax loss),
the :meth:`df` method which calculates the first derivative of the function and :meth:`ddf`
which calculates the second derivative.
Additionally, this class provides two methods (:meth:`knmp_grad` and :meth:`knmp_hess`) which
calculate kernel-vector products using the loss derivatives for vectors. These functions are
specific to the logistic Falkon algorithm.
Parameters
-----------
name
A descriptive name for the loss function (e.g. "logistic", "softmax")
kernel
The kernel function used for training a LogFalkon model
opt
Falkon options container. Will be passed to the kernel when computing kernel-vector
products.
See Also
--------
:class:`LogisticLoss` : a concrete implementation of this class for the logistic loss.
:class:`falkon.models.LogisticFalkon` : the logistic Falkon model which uses GSC losses.
"""
def __init__(self,
name: str,
kernel: falkon.kernels.kernel.Kernel,
opt: Optional[FalkonOptions] = None):
self.name = name
self.kernel = kernel
self.params = opt or FalkonOptions()
def _update_opt(self, opt: Optional[FalkonOptions]):
new_opt = self.params
if opt is not None:
new_opt = dataclasses.replace(self.params, **dataclasses.asdict(opt))
return new_opt
@abstractmethod
def __call__(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:
"""Abstract method. Should return the loss for predicting `y2` with true labels `y1`.
Parameters
----------
y1 : torch.Tensor
One of the two inputs to the loss. This should be interpreted as the `true` labels.
y2 : torch.Tensor
The other loss input. Should be interpreted as the predicted labels.
Returns
-------
torch.Tensor
The loss calculated for the two inputs.
"""
pass
@abstractmethod
def df(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:
"""Abstract method. Should return the derivative of the loss wrt `y2`.
Parameters
----------
y1 : torch.Tensor
One of the two inputs to the loss. This should be interpreted as the `true` labels.
y2 : torch.Tensor
The other loss input. Should be interpreted as the predicted labels. The derivative
should be computed with respect to this tensor.
Returns
-------
torch.Tensor
The derivative of the loss with respect to `y2`. It will be a tensor of the same shape
as the two inputs.
"""
pass
@abstractmethod
def ddf(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:
"""Abstract method. Should return the second derivative of the loss wrt `y2`.
Parameters
----------
y1 : torch.Tensor
One of the two inputs to the loss. This should be interpreted as the `true` labels.
y2 : torch.Tensor
The other loss input. Should be interpreted as the predicted labels. The derivative
should be computed with respect to this tensor.
Returns
-------
torch.Tensor
The second derivative of the loss with respect to `y2`. It will be a tensor of the
same shape as the two inputs.
"""
pass
def knmp_grad(self,
X: torch.Tensor,
Xc: torch.Tensor,
Y: torch.Tensor,
u: torch.Tensor,
opt: Optional[FalkonOptions] = None) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Computes a kernel vector product where the vector is the first derivative of this loss
Given kernel function :math:`K`, the loss represented by this class :math:`\mathcal{l}`,
number of samples :math:`n`, this function follows equation
.. math::
\dfrac{1}{n} K(X_c, X) @ (\mathcal{l}'(Y, K(X, X_c) @ u))
Parameters
----------
X : torch.Tensor
Data matrix of shape (n x d) with `n` samples in `d` dimensions.
Xc : torch.Tensor
Center matrix of shape (m x d) with `m` centers in `d` dimensions.
Y : torch.Tensor
Label matrix of shape (n x t) with `n` samples. Depending on the loss, the labels may or may not
have more than one dimension.
u : torch.Tensor
A vector (or matrix if the labels are multi-dimensional) of weights of shape (m x t).
The product `K(X, Xc) @ u`, where `K` is the kernel associated to this loss, should
produce label predictions.
opt : FalkonOptions or None
Options to be passed to the mmv function for the kernel associated to this loss.
Options passed as an argument take precedence over the options used to build this
class instance.
Returns
-------
grad_mul : torch.Tensor
A tensor of shape (m x 1) coming from the multiplication of the kernel matrix
`K(Xc, X)` and the loss calculated on predictions with weights `u`.
The formula followed is: `(1/n) * K(Xc, X) @ df(Y, K(X, Xc) @ u)`.
func_val : torch.Tensor
A tensor of shape (n x t) of predictions obtained with weights `u`.
"""
opt = self._update_opt(opt)
func_val = self.kernel.mmv(X, Xc, u, opt=opt)
grad = self.df(Y, func_val)
out = self.kernel.mmv(Xc, X, grad, opt=opt)
out.mul_(1 / X.shape[0])
return out, func_val
def knmp_hess(self,
X: torch.Tensor,
Xc: torch.Tensor,
Y: torch.Tensor,
f: torch.Tensor,
u: torch.Tensor,
opt: Optional[FalkonOptions] = None) -> torch.Tensor:
r"""Compute a kernel-vector product with a rescaling with the second derivative
Given kernel function :math:`K`, the loss represented by this class :math:`\mathcal{l}`,
number of samples :math:`n`, this function follows equation
.. math::
\dfrac{1}{n} K(X_c, X) @ (\mathcal{l}''(Y, f) * K(X, X_c) @ u)
Parameters
----------
X : torch.Tensor
Data matrix of shape (n x d) with `n` samples in `d` dimensions.
Xc : torch.Tensor
Center matrix of shape (m x d) with `m` centers in `d` dimensions.
Y : torch.Tensor
Label matrix of shape (n x t) with `n` samples. Depending on the loss, the labels may
or may not have more than one dimension.
f : torch.Tensor
Tensor of shape (n x t) of predictions. Typically this will be the second output of
the :meth:`knmp_grad` method.
u : torch.Tensor
A vector (or matrix if the labels are multi-dimensional) of weights of shape (m x t).
The product `K(X, Xc) @ u`, where `K` is the kernel associated to this loss, should
produce label predictions.
opt : FalkonOptions or None
Options to be passed to the mmv function for the kernel associated to this loss.
Options passed as an argument take precedence over the options used to build this
class instance.
Returns
-------
A tensor of shape (m x t), the output of the computation.
"""
opt = self._update_opt(opt)
inner = self.kernel.mmv(X, Xc, u, opt=opt)
inner.mul_(self.ddf(Y, f))
outer = self.kernel.mmv(Xc, X, inner, opt=opt)
outer.mul_(1 / X.shape[0])
return outer
def __str__(self):
return self.name
class LogisticLoss(Loss):
"""Wrapper for the logistic loss, to be used in conjunction with the :class:`~falkon.models.LogisticFalkon` estimator.
Parameters
-----------
kernel : falkon.kernels.kernel.Kernel
The kernel function used for training a :class:`~falkon.models.LogisticFalkon` model
opt : FalkonOptions
Falkon options container. Will be passed to the kernel when computing kernel-vector
products.
Examples
--------
>>> k = falkon.kernels.GaussianKernel(3)
>>> log_loss = LogisticLoss(k)
>>> estimator = falkon.LogisticFalkon(k, [1e-4, 1e-4, 1e-4], [3, 3, 3], loss=log_loss, M=100)
"""
def __init__(self, kernel: falkon.kernels.kernel.Kernel, opt: Optional[FalkonOptions] = None):
super().__init__(name="LogisticLoss", kernel=kernel, opt=opt)
def __call__(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:
r"""Compute the logistic loss between two 1-dimensional tensors
The formula used is :math:`\log(1 + \exp(-y_1 * y_2))`
Parameters
----------
y1
The first input tensor. Must be 1D
y2
The second input tensor. Must be 1D
Returns
-------
loss
The logistic loss between the two input vectors.
"""
return torch.log(1 + torch.exp(-y1 * y2))
def df(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:
r"""Compute the derivative of the logistic loss with respect to `y2`
The formula used is
.. math::
-y_1 / (1 + \exp{y_1 * y_2})
Parameters
----------
y1
The first input tensor. Must be 1D
y2
The second input tensor. Must be 1D
Returns
-------
d_loss
The derivative of the logistic loss, calculated between the two input vectors.
"""
out = -y1
div = y1 * y2
div.exp_().add_(1)
out.div_(div)
return out
def ddf(self, y1: torch.Tensor, y2: torch.Tensor) -> torch.Tensor:
r"""Compute the second derivative of the logistic loss with respect to `y2`
The formula used is
.. math::
y_1^2 \dfrac{1}{1 + \exp{-y_1 * y_2}} \dfrac{1}{1 + \exp{y_1 * y_2}}
Parameters
----------
y1
The first | |
setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_template_folders_fk_put(self, id, fk, **kwargs):
"""
Update a related item by id for templateFolders.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_fk_put(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for templateFolders (required)
:param PortalTemplateFolder data:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_template_folders_fk_put_with_http_info(id, fk, **kwargs)
else:
(data) = self.portals_id_template_folders_fk_put_with_http_info(id, fk, **kwargs)
return data
def portals_id_template_folders_fk_put_with_http_info(self, id, fk, **kwargs):
"""
Update a related item by id for templateFolders.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_fk_put_with_http_info(id, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str fk: Foreign key for templateFolders (required)
:param PortalTemplateFolder data:
:return: PortalTemplateFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_template_folders_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_template_folders_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_template_folders_fk_put`")
collection_formats = {}
resource_path = '/Portals/{id}/templateFolders/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplateFolder',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_template_folders_get(self, id, **kwargs):
"""
Queries templateFolders of Portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str filter:
:return: list[PortalTemplateFolder]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_template_folders_get_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_template_folders_get_with_http_info(id, **kwargs)
return data
def portals_id_template_folders_get_with_http_info(self, id, **kwargs):
"""
Queries templateFolders of Portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str filter:
:return: list[PortalTemplateFolder]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_template_folders_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_template_folders_get`")
collection_formats = {}
resource_path = '/Portals/{id}/templateFolders'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PortalTemplateFolder]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_template_folders_nk_templates_fk_rel_delete(self, id, id2, nk, fk, **kwargs):
"""
Unlink folder with Template and Portal
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_nk_templates_fk_rel_delete(id, id2, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str id2: Portal id (required)
:param str nk: PortalTemplateFolder id (required)
:param str fk: Template id (required)
:return: PortalTemplate
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_template_folders_nk_templates_fk_rel_delete_with_http_info(id, id2, nk, fk, **kwargs)
else:
(data) = self.portals_id_template_folders_nk_templates_fk_rel_delete_with_http_info(id, id2, nk, fk, **kwargs)
return data
def portals_id_template_folders_nk_templates_fk_rel_delete_with_http_info(self, id, id2, nk, fk, **kwargs):
"""
Unlink folder with Template and Portal
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_nk_templates_fk_rel_delete_with_http_info(id, id2, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str id2: Portal id (required)
:param str nk: PortalTemplateFolder id (required)
:param str fk: Template id (required)
:return: PortalTemplate
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'id2', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_template_folders_nk_templates_fk_rel_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_template_folders_nk_templates_fk_rel_delete`")
# verify the required parameter 'id2' is set
if ('id2' not in params) or (params['id2'] is None):
raise ValueError("Missing the required parameter `id2` when calling `portals_id_template_folders_nk_templates_fk_rel_delete`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_template_folders_nk_templates_fk_rel_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_template_folders_nk_templates_fk_rel_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/templateFolders/{nk}/templates/{fk}/rel'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'id2' in params:
path_params['id'] = params['id2']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalTemplate',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_template_folders_nk_templates_fk_rel_put(self, id, id2, nk, fk, **kwargs):
"""
Link folder with Template and Portal
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_nk_templates_fk_rel_put(id, id2, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param | |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 15:52:24 2013
@author: hlampesberger
"""
from base import Result, mapping, epsilon, write_pdf, write_png
import ops
import nfa
import operator
import itertools
import collections
__all__ = ['DFA']
class DFA(object):
def __init__(self, alphabet, states,
start, accept, reject,
delta, transitions=None, sink=None):
self.alphabet = alphabet
self.states = states
self.start = start
self.accept = accept
self.reject = reject
self.delta = delta
self.transitions = transitions
self.sink = sink
# properties
self._complete = None
def __len__(self):
return len(self.states)
def __repr__(self):
return "<DFA states:%d complete:%d>" % (len(self), self.is_complete())
@staticmethod
def build(**kwargs):
alp = kwargs.get('alphabet')
if not isinstance(alp, set):
alp = set(alp)
assert(epsilon not in alp)
states = set(kwargs.get('states'))
acc = set(kwargs.get('accept_states'))
rej = set(kwargs.get('reject_states'))
start_state = kwargs.get('start_state')
transitions = kwargs.get('transitions')
sink = kwargs.get('sink', None)
if sink is not None:
if not (sink in states and sink in rej):
raise RuntimeError("Invalid sink state!")
tr = collections.defaultdict(lambda: sink)
# tr = {(s, sym) : ns for s, sym, ns in transitions}
for q, a, qn in transitions:
tr[q, a] = qn
# delta = lambda s, sym: tr.get((s, sym), sink)
delta = lambda q, a: tr[q, a]
return DFA(alp, states, start_state, acc, rej, delta, tr, sink)
def rename(self, state_func=None, alp_func=None):
if state_func is None:
state_func = mapping()
alp = None
if alp_func is None:
# identity
alp_func = lambda x: x
alp = self.alphabet
else:
alp = { alp_func(a) for a in self.alphabet}
start = state_func(self.start)
states = { state_func(s) for s in self.states }
accept = { state_func(s) for s in self.accept }
reject = { state_func(s) for s in self.reject }
newsink = None
if self.sink is not None:
newsink = state_func(self.sink)
tr = collections.defaultdict(lambda: newsink)
for q, a, qn in self.itertransitions():
tr[state_func(q), alp_func(a)] = state_func(qn)
delta = lambda q, a: tr[q, a]
return DFA(alp, states, start, accept, reject,
delta, tr, newsink)
def parse(self, iterable):
final = reduce(self.delta, iterable, self.start)
if final in self.accept:
return Result.accept, final
elif final is None or final in self.reject:
return Result.reject, final
else:
return Result.neutral, final
def membership(self, iterable):
return self.parse(iterable)[0]
def successor(self, state):
return {self.delta(state, sym) for sym in self.alphabet if \
self.delta(state, sym) is not None}
def predecessor(self, state):
return {s for s in self.states for sym in self.alphabet \
if self.delta(s, sym) == state}
def generate_example(self):
frontier = collections.deque()
frontier.append((self.start, []))
visited = set()
while frontier:
state, buf = frontier.popleft()
visited.add(state)
if state in self.accept:
return buf
else:
# add neighbors
for a in self.alphabet:
ns = self.delta(state, a)
if ns is not None and ns not in visited:
frontier.append((ns, buf + [a]))
def reachable_states(self):
visited = set()
frontier = [ self.start ]
while frontier:
s = frontier.pop()
visited.add(s)
for ns in self.successor(s):
if ns not in visited:
frontier.append(ns)
return visited
def dead_states(self):
return {s for s in self.reject \
if all(self.delta(s, sym) == s for sym in self.alphabet)}
def is_complete(self):
if self._complete is None:
if any(self.delta(s, sym) is None \
for s in self.states for sym in self.alphabet):
self._complete = False
else:
self._complete = True
return self._complete
def is_empty(self):
# depth first search from start to check whether an accepting state
# is reachable or not
frontier = [self.start]
visited = set()
while frontier:
s = frontier.pop()
if s in self.accept:
# early exit
return False
visited.add(s)
for ns in self.successor(s):
if ns not in visited:
frontier.append(ns)
return True
def is_universal(self):
# depth first search from start to check whether every reachable state
# is accepting
frontier = [self.start]
visited = set()
while frontier:
s = frontier.pop()
if s not in self.accept:
# early exit
return False
visited.add(s)
for ns in self.successor(s):
if ns not in visited:
frontier.append(ns)
return True
# TODO: implement test for infiniteness
def is_infinite(self):
raise NotImplementedError
def complete(self, sink= -1):
if not self.is_complete():
self.states.add(sink)
self.reject.add(sink)
oldsink = self.sink
self.sink = sink
if self.transitions is not None:
# change default if transitions are still available
for (q, a), qn in self.transitions.items():
if qn is oldsink:
self.transitions[q, a] = sink
self.transitions.default_factory = lambda: sink
else:
d = self.delta
# compose new delta function
def new_delta(s, sym):
ns = d(s, sym)
if ns is not None:
return ns
else:
return sink
self.delta = new_delta
self._complete = True
def del_states(self, states):
assert(self.start not in states)
if self.states & states:
self.states -= states
self.accept -= states
self.reject -= states
if self.transitions is not None:
for (s, sym), ns in self.transitions.items():
if (s in states) or (ns in states):
del(self.transitions[s, sym])
# repair delta default
if self.sink in states:
self.sink = None
self.delta = lambda s, sym: \
self.transitions.get((s, sym), None)
else:
d = self.delta
def new_delta(s, sym):
ns = d(s, sym)
if s in states or ns in states:
return None
else:
return ns
self.delta = new_delta
self._complete = None
return self
def del_unreachable_states(self):
self.del_states(self.states - self.reachable_states())
return self
def del_dead_states(self):
self.del_states(self.dead_states())
return self
def itertransitions(self):
for s in self.states:
for sym in self.alphabet:
ns = self.delta(s, sym)
if ns is not None:
yield s, sym, ns
def invert(self):
tmp = self.accept
self.accept = self.reject
self.reject = tmp
return self
def reverse(self):
A = nfa.NFA.viewDFA(self)
return A.reverse().determinize()
def myhill_nerode_equiv_classes(self):
not_accepting_states = self.states - self.accept
P = [set(self.accept), not_accepting_states]
W = [set(self.accept), not_accepting_states]
while W:
S = W.pop()
for a in self.alphabet:
inv_states = {s for s in self.states if self.delta(s, a) in S}
Pnew = []
for R in P:
R1 = R & inv_states
if R1 and not (R.issubset(inv_states)):
R2 = R - R1
Pnew.append(R1)
Pnew.append(R2)
if R in W:
W.remove(R)
W.append(R1)
W.append(R2)
else:
W.append(min(R1, R2, key=len))
else:
Pnew.append(R)
P = Pnew
return {frozenset(s) for s in P}
def minimize(self):
self.complete()
self.del_unreachable_states()
eq_classes = self.myhill_nerode_equiv_classes()
# construct new dfa with minimal states
tr = collections.defaultdict(lambda: None)
new_start = None
new_accept = set()
new_reject = set()
# set_ref : States --> EquivClasses
set_ref = dict()
for c in eq_classes:
for state in c:
set_ref[state] = c
# then add transitions
for c in eq_classes:
if self.start in c:
if new_start is not None:
raise RuntimeError("Start state in multiple classes!")
else:
new_start = c
if c & self.accept:
new_accept.add(c)
elif c & self.reject:
new_reject.add(c)
# choose transitions from one representative
for a in self.alphabet:
for s in c:
tr[c, a] = set_ref[self.delta(s, a)]
# do it only for the first representative in c
break
# delta = lambda s, sym: tr.get((s, sym), None)
delta = lambda q, a: tr[q, a]
return DFA(self.alphabet, eq_classes, new_start, new_accept,
new_reject, delta, tr)
def _product(self, other, f_acc, f_rej):
if self.alphabet != other.alphabet:
raise RuntimeError("Incompatible alphabets")
start = (self.start, other.start)
alp = self.alphabet
states = set()
accept = set()
reject = set()
for s1, s2 in itertools.product(self.states, other.states):
s = (s1, s2)
states.add(s)
if f_acc(s1 in self.accept, s2 in other.accept):
accept.add(s)
if f_rej(s1 in self.reject, s2 in other.reject):
reject.add(s)
# dereference from self
d1 = self.delta
d2 = other.delta
delta = lambda s, sym: (d1(s[0], sym), d2(s[1], sym))
return DFA(alp, states, start, accept, reject, delta)
def __and__(self, other):
# intersection
return self._product(other, operator.__and__, operator.__or__)
def __or__(self, other):
# union
return self._product(other, operator.__or__, operator.__and__)
def __xor__(self, other):
# symmetric difference
return self._product(other, operator.__xor__, operator.__eq__)
def __le__(self, other):
# subset
pdfa = self._product(other, lambda x, y: x and not y,
lambda x, y: not x or y)
return pdfa.is_empty()
def __ge__(self, other):
# superset
return other.issubset(self)
def __eq__(self, other):
# equality
return self.__xor__(other).is_empty()
def __ne__(self, other):
# not equal
return not self.__xor__(other).is_empty()
def __sub__(self, other):
# difference
return self.__and__(other.complement())
def __copy__(self):
tr = collections.defaultdict(lambda: self.sink)
for q, a, qn in self.itertransitions():
if qn is not self.sink:
tr[q, a] = qn
delta = lambda q, a: tr[q, a]
return DFA(self.alphabet, set(self.states), self.start,
set(self.accept), set(self.reject),
delta, tr, self.sink)
def __mul__(self, other):
n1 = nfa.NFA.viewDFA(self)
n2 = nfa.NFA.viewDFA(other)
return (n1 * n2).determinize()
# hopcroft karp naive algorithm
def equivalent_states(self, other):
def out(dfa, state):
if state in dfa.accept:
return Result.accept
elif state in | |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 23 12:19:34 2020
@author: Mark
"""
import numpy as np
import json
import xlsxwriter
from model.algorithms.algorithm1 import Algorithm1
from model.algorithms.algorithm4 import Algorithm4
from model.algorithms.algorithm5 import Algorithm5
from model.algorithms.algorithm6 import Algorithm6
import datetime
from model.base_model import (Spectrum, Gauss, Lorentz, VacuumExcitation,
MeasuredSpectrum, ScatteringMedium, Calculation,
Tougaard, Voigt, SyntheticSpectrum)
from converters.data_converter import DataConverter
from converters.vamas import Vamas
import time
class Model():
def __init__(self):
self.start = 1200
self.stop = 1400
self.step = 0.1
self.loaded_spectra = []
self.unscattered_spectrum = Spectrum(self.start,self.stop,self.step)
self.scattered_spectrum = Spectrum(self.start,self.stop,self.step)
self.simulated_spectrum = Spectrum(self.start,self.stop,self.step)
self.scattering_medium = ScatteringMedium()
self.component_spectra = []
self.scatterers = {}
self.loss_component_kinds = ['Gauss', 'Lorentz', 'VacuumExcitation',
'Tougaard']
self.peak_kinds = ['Gauss', 'Lorentz', 'Voigt']
self.calculation = Calculation()
self.calculation.n_iter = int(100)
''' The next two attributes are for the synthetic spectrum builder
'''
self.default_spectrum_fields = [{'name':'position',
'label':'Position',
'value':0.0},
{'name':'width',
'label':'Width',
'value':0.0},
{'name':'Intensity',
'label':'Intensity',
'value':0.0}]
self.spec_builder_columns = [{'name':'Nr.','width':30},
{'name':'Type', 'width':50},
{'name':'Position','width':60},
{'name':'Width', 'width':60},
{'name':'Intensity', 'width':60}]
self.spec_builder_column_attributes = ['position', 'width',
'intensity']
self.algorithm_option = ''
self.algorithm_id = 0 # this is the default algorithm to use
self.algorithms = {0:Algorithm5,1:Algorithm1,2:Algorithm6}
self.algorithmInputs()
self.selector_table_params = {'table_name':'selector',
'height':14,
'on_double_click':'visibility',
'selectmode':'extended',
'colour_keys':False,
'columns':[{'name':'Nr.',
'width':30},
{'name':'Type',
'width':75},
{'name':'Name',
'width':150}
]
}
self.selector_fig_params = {'xlabel':'Energy [eV]',
'ylabel': 'Intensity [cts./sec.]',
'axis_label_fontsize':8,
'title':'',
'size':(4,3)}
self.export_formats = ['Vamas', 'Excel']
''' The variable called 'variable_mapping' is used to store a
collection of objects and their attribute names for the parameters that
are exchanged with the controller for the algorithm parameter inputs'''
self.variable_mapping = {
'inelastic_xsect': self.scattering_medium.scatterer,
'distance': self.scattering_medium,
'pressure': self.scattering_medium,
'norm_factor': self.scattering_medium.scatterer,
'n_iter': self.calculation,
'inelastic_prob': self.scattering_medium.scatterer,
}
def loadFile(self, filename):
"""
This function instantiates a Converter object to parse a file.
The parsed data is stored inside the Converter's data attribute.
Parameters
----------
filename : STRING
The name of the file to be parsed
Returns
-------
INT
The number of items in the converter's data list (i.e. the number
of spectra in the file.
"""
self.converter = DataConverter()
self.converter.load(filename)
if filename.rsplit('.')[-1] == 'vms':
for data in self.converter.data:
if data['settings']['y_units'] == 'counts':
y = np.array(data['data']['y0'])
dwell = data['settings']['dwell_time']
scans = data['scans']
y = y / dwell / scans
data['data']['y0'] = list(y)
data['settings']['y_units'] = 'counts_per_second'
return len(self.converter.data)
def returnSelectorParams(self):
return self.selector_table_params, self.selector_fig_params
def returnFileContents(self):
data = self.converter.data
contents = [[idx, d['spectrum_type'], d['group_name']]
for idx, d in enumerate(data)]
return contents
def loadSpectra(self, selection):
"""
This function loads the selected spectra from the file into the
loaded_spectra attribute
Parameters
----------
selection : LIST of integers
The indices of the Converter object's data attribute (a list),
representing the items in the data attribute that should be
stored in the Models loaded_spectra attribute.
Returns
-------
None.
"""
for idx in selection:
x = self.converter.data[idx]['data']['x']
y = self.converter.data[idx]['data']['y0']
self.loaded_spectra += [MeasuredSpectrum(x,y)]
self.converter = None
self.start = self.loaded_spectra[-1].start # The step width must be defined by the measured spectrum
self.stop = self.loaded_spectra[-1].stop # All synthetic spectra need to have their step widths redefined
self.step = self.loaded_spectra[-1].step # and their lineshapes rebuilt
self.scattering_medium.scatterer.loss_function.step = self.step # Redefine step width of loss function
def loadSpectrum(self, filename):
"""
This function loads a spectrum from an external file.
Parameters
----------
filename : STRING
The name of the file (with its extension) to be read.
Returns
-------
None.
"""
selection = [self.loadFile(filename) - 1]
self.loadSpectra(selection)
def updateSpectrumKinds(self):
for spec in self.loaded_spectra:
if spec.kind == 'Scattered':
self.scattered_spectrum = spec
if spec.kind == 'Unscattered':
self.unscattered_spectrum = spec
def _updateLossFunction(self, step):
"""
This function reconstructs the loss function when the step size of the
spectrum that it should be convolved or deconvolved with changes.
Parameters
----------
step : FLOAT
The new step size.
Returns
-------
None.
"""
loss_function = self.scattering_medium.scatterer.loss_function
loss_function.step = step
loss_function.reBuild()
def setScatteredSpectrum(self,idx):
"""
This function changes which spectrum is the 'scattered spectrum'. The
scattered spectrum is used in the deconvolution algorithm.
Parameters
----------
idx : INT
Indes of the spectrum in the list called self.loaded_spectra.
Returns
-------
None.
"""
spectrum = self.loaded_spectra[idx]
self.scattered_spectrum = spectrum
step = spectrum.step
loss_function = self.scattering_medium.scatterer.loss_function
loss_function.step = step
loss_function.reBuild()
def setUnscatteredSpectrum(self,idx):
"""
This function changes which spectrum is the 'unscattered spectrum'. The
scattered spectrum is used in the convolution algorithm.
Parameters
----------
idx : INT
Index of the spectrum in the list called self.loaded_spectra.
Returns
-------
None.
"""
spectrum = self.loaded_spectra[idx]
self.unscattered_spectrum = spectrum
step = spectrum.step
loss_function = self.scattering_medium.scatterer.loss_function
loss_function.step = step
loss_function.reBuild()
def scatterSpectrum(self):
"""
This function runs one of the calculations that simulates inelastic
scattering. The choice of calculations is determined from the
algorithm_id which is an attribute of the Model. The algorithm is run
and the results are stored in a simulated_spectrum object, then added
to the list of loaded spectra.
Returns
-------
None.
"""
algorithm_id = self.algorithm_id
params = self._getAlgorithmParams(algorithm_id)
algorithm_type = self.algorithms[algorithm_id].algorithm_type
self._prepSpectra(algorithm_type)
if algorithm_id == 0:
self.simulation = self.algorithms[algorithm_id](self.unscattered_spectrum,
self.scattering_medium, params)
simulated = self.simulation.run()
elif algorithm_id == 1:
self.simulation = self.algorithms[algorithm_id](self.unscattered_spectrum,
self.scattering_medium, params)
simulated = self.simulation.run()
self.simulated_spectrum.lineshape = simulated
self.simulated_spectrum.x = self.unscattered_spectrum.x
self.simulated_spectrum.kind = 'Simulated'
self.intermediate_spectra = self.simulation.I
self._onlyOneSimulated()
def unScatterSpectrum(self):
"""
This function runs a deconvolution algorithm.
Returns
-------
None.
"""
algorithm_id = 2
params = self._getAlgorithmParams(algorithm_id)
algorithm_type = self.algorithms[algorithm_id].algorithm_type
self._prepSpectra(algorithm_type)
self.simulation = self.algorithms[algorithm_id](self.scattered_spectrum,
self.scattering_medium, params)
simulated = self.simulation.run()
self.simulated_spectrum.lineshape = simulated
self.simulated_spectrum.x = self.scattered_spectrum.x
self.simulated_spectrum.kind = 'Simulated'
self.intermediate_spectra = self.simulation.I
self._onlyOneSimulated()
def _onlyOneSimulated(self):
''' This condition ensures that there is only one simulated spectrum.'''
if not ('Simulated' in [i.kind for i in self.loaded_spectra]):
self.loaded_spectra += [self.simulated_spectrum]
else:
idx = [i.kind for i in self.loaded_spectra].index('Simulated')
self.loaded_spectra[idx] = self.simulated_spectrum
def _prepSpectra(self, algorithm_type):
if algorithm_type == 'convolution':
step = self.unscattered_spectrum.step
elif algorithm_type == 'deconvolution':
step = self.scattered_spectrum.step
self._updateLossFunction(step)
def algorithmInputs(self):
"""inputs_dict holds all the inputs needed for each algorithm, as well
as their values, the labels to be used in the View, and the variable
name used in the objects in Model"""
self.inputs_dict = {
0:[
{'label': 'P [mbar]', 'value':'', 'variable':'pressure',
'tip':'''P represents the pressure in mbar, of the scattering
medium'''},
{'label': 'D [mm]', 'value':'','variable':'distance',
'tip':'''D represents the distance from the sample to the
spectrometer nozzle, the is, the distance
electrons travel through the scattering medium.'''},
{'label':'Inelastic X-sect', 'value':'',
'variable':'inelastic_xsect',
'tip':'''Inelastic X-sect is the inelastic scattering cross-
section of the scattering medium, in units of nm^2'''},
{'label': 'f(Norm.)', 'value':'', 'variable':'norm_factor',
'tip': '''f(Norm.) is a normalization factor. It is used to
account for loss of electron signal, due to
energy loss processes not unaccounted for in
the loss function, such as core-level excitations.'''
}],
1:[
{'label':'Inelastic Prob.', 'value':'',
'variable':'inelastic_prob',
'tip':'''Inelastic Prob. is the probability of an inelastic
scattering event for one iteration.'''},
{'label': 'f(Norm.)', 'value':'', 'variable':'norm_factor',
'tip':'''f(Norm.) is a normalization factor. It is used to
account for loss of electron signal, due to energy loss processes not
unaccounted for in the loss function, such as core-level excitations.'''},
{'label':'Nr. Iter.', 'value':'','variable':'n_iter',
'tip':'''Nr. Iter. represents the number of iterations to
calculate the convolution. It can be though of as a
small delta-distance an electron travels through in
the scattering medium.'''}]
}
def changeAlgorithm(self,new_id):
self.algorithm_id = int(new_id)
return self._returnInputFields()
def _returnInputFields(self):
"""
Inputs is the list of dictionaries that is exchanged between the
model and the controller to hold the algorithm input parameters.
Inputs_dict stores a dictionary of dictionaries, where one finds the
dictionary of input parameters needed for each algorithm.
The loop iterates through the list of dicts of self.inputs, then sets
the value of the item in self.inputs to the corresponding value from
the object attribute, stored in Model. self.inputs will eventually be
sent to the controller, to be sent to the view.
Returns
-------
inputs : LIST of DICT's'
Keys:
'label' : The label to be displayed in the view.
'value' : The value to be set in the input field of the view.
'variable' : The | |
<filename>sympy/stats/tests/test_rv.py
from __future__ import unicode_literals
from sympy import (
S,
Symbol,
Interval,
exp,
symbols,
Eq,
cos,
And,
Tuple,
integrate,
oo,
sin,
Sum,
Basic,
DiracDelta,
Lambda,
log,
pi,
FallingFactorial,
Rational,
)
from sympy.stats import (
Die,
Normal,
Exponential,
FiniteRV,
P,
E,
H,
variance,
density,
given,
independent,
dependent,
where,
pspace,
random_symbols,
sample,
Geometric,
factorial_moment,
Binomial,
Hypergeometric,
DiscreteUniform,
Poisson,
characteristic_function,
moment_generating_function,
)
from sympy.stats.rv import (
IndependentProductPSpace,
rs_swap,
Density,
NamedArgsMixin,
RandomSymbol,
sample_iter,
PSpace,
)
from sympy.testing.pytest import raises
from sympy.core.numbers import comp
from sympy.stats.frv_types import BernoulliDistribution
def test_where():
X, Y = Die("X"), Die("Y")
Z = Normal("Z", 0, 1)
assert where(Z ** 2 <= 1).set == Interval(-1, 1)
assert where(Z ** 2 <= 1).as_boolean() == Interval(-1, 1).as_relational(Z.symbol)
assert where(And(X > Y, Y > 4)).as_boolean() == And(
Eq(X.symbol, 6), Eq(Y.symbol, 5)
)
assert len(where(X < 3).set) == 2
assert 1 in where(X < 3).set
X, Y = Normal("X", 0, 1), Normal("Y", 0, 1)
assert where(And(X ** 2 <= 1, X >= 0)).set == Interval(0, 1)
XX = given(X, And(X ** 2 <= 1, X >= 0))
assert XX.pspace.domain.set == Interval(0, 1)
assert XX.pspace.domain.as_boolean() == And(
0 <= X.symbol, X.symbol ** 2 <= 1, -oo < X.symbol, X.symbol < oo
)
with raises(TypeError):
XX = given(X, X + 3)
def test_random_symbols():
X, Y = Normal("X", 0, 1), Normal("Y", 0, 1)
assert set(random_symbols(2 * X + 1)) == set((X,))
assert set(random_symbols(2 * X + Y)) == set((X, Y))
assert set(random_symbols(2 * X + Y.symbol)) == set((X,))
assert set(random_symbols(2)) == set()
def test_characteristic_function():
# Imports I from sympy
from sympy import I
X = Normal("X", 0, 1)
Y = DiscreteUniform("Y", [1, 2, 7])
Z = Poisson("Z", 2)
t = symbols("_t")
P = Lambda(t, exp(-(t ** 2) / 2))
Q = Lambda(t, exp(7 * t * I) / 3 + exp(2 * t * I) / 3 + exp(t * I) / 3)
R = Lambda(t, exp(2 * exp(t * I) - 2))
assert characteristic_function(X) == P
assert characteristic_function(Y) == Q
assert characteristic_function(Z) == R
def test_moment_generating_function():
X = Normal("X", 0, 1)
Y = DiscreteUniform("Y", [1, 2, 7])
Z = Poisson("Z", 2)
t = symbols("_t")
P = Lambda(t, exp(t ** 2 / 2))
Q = Lambda(t, (exp(7 * t) / 3 + exp(2 * t) / 3 + exp(t) / 3))
R = Lambda(t, exp(2 * exp(t) - 2))
assert moment_generating_function(X) == P
assert moment_generating_function(Y) == Q
assert moment_generating_function(Z) == R
def test_sample_iter():
X = Normal("X", 0, 1)
Y = DiscreteUniform("Y", [1, 2, 7])
Z = Poisson("Z", 2)
expr = X ** 2 + 3
iterator = sample_iter(expr)
expr2 = Y ** 2 + 5 * Y + 4
iterator2 = sample_iter(expr2)
expr3 = Z ** 3 + 4
iterator3 = sample_iter(expr3)
def is_iterator(obj):
if (
hasattr(obj, "__iter__")
and (hasattr(obj, "next") or hasattr(obj, "__next__"))
and callable(obj.__iter__)
and obj.__iter__() is obj
):
return True
else:
return False
assert is_iterator(iterator)
assert is_iterator(iterator2)
assert is_iterator(iterator3)
def test_pspace():
X, Y = Normal("X", 0, 1), Normal("Y", 0, 1)
x = Symbol("x")
raises(ValueError, lambda: pspace(5 + 3))
raises(ValueError, lambda: pspace(x < 1))
assert pspace(X) == X.pspace
assert pspace(2 * X + 1) == X.pspace
assert pspace(2 * X + Y) == IndependentProductPSpace(Y.pspace, X.pspace)
def test_rs_swap():
X = Normal("x", 0, 1)
Y = Exponential("y", 1)
XX = Normal("x", 0, 2)
YY = Normal("y", 0, 3)
expr = 2 * X + Y
assert expr.subs(rs_swap((X, Y), (YY, XX))) == 2 * XX + YY
def test_RandomSymbol():
X = Normal("x", 0, 1)
Y = Normal("x", 0, 2)
assert X.symbol == Y.symbol
assert X != Y
assert X.name == X.symbol.name
X = Normal("lambda", 0, 1) # make sure we can use protected terms
X = Normal("Lambda", 0, 1) # make sure we can use SymPy terms
def test_RandomSymbol_diff():
X = Normal("x", 0, 1)
assert (2 * X).diff(X)
def test_random_symbol_no_pspace():
x = RandomSymbol(Symbol("x"))
assert x.pspace == PSpace()
def test_overlap():
X = Normal("x", 0, 1)
Y = Normal("x", 0, 2)
raises(ValueError, lambda: P(X > Y))
def test_IndependentProductPSpace():
X = Normal("X", 0, 1)
Y = Normal("Y", 0, 1)
px = X.pspace
py = Y.pspace
assert pspace(X + Y) == IndependentProductPSpace(px, py)
assert pspace(X + Y) == IndependentProductPSpace(py, px)
def test_E():
assert E(5) == 5
def test_H():
X = Normal("X", 0, 1)
D = Die("D", sides=4)
G = Geometric("G", 0.5)
assert H(X, X > 0) == -log(2) / 2 + S.Half + log(pi) / 2
assert H(D, D > 2) == log(2)
assert comp(H(G).evalf().round(2), 1.39)
def test_Sample():
X = Die("X", 6)
Y = Normal("Y", 0, 1)
z = Symbol("z")
assert sample(X) in [1, 2, 3, 4, 5, 6]
assert sample(X + Y).is_Float
P(X + Y > 0, Y < 0, numsamples=10).is_number
assert E(X + Y, numsamples=10).is_number
assert variance(X + Y, numsamples=10).is_number
raises(ValueError, lambda: P(Y > z, numsamples=5))
assert P(sin(Y) <= 1, numsamples=10) == 1
assert P(sin(Y) <= 1, cos(Y) < 1, numsamples=10) == 1
# Make sure this doesn't raise an error
E(Sum(1 / z ** Y, (z, 1, oo)), Y > 2, numsamples=3)
assert all(i in range(1, 7) for i in density(X, numsamples=10))
assert all(i in range(4, 7) for i in density(X, X > 3, numsamples=10))
def test_given():
X = Normal("X", 0, 1)
Y = Normal("Y", 0, 1)
A = given(X, True)
B = given(X, Y > 2)
assert X == A == B
def test_factorial_moment():
X = Poisson("X", 2)
Y = Binomial("Y", 2, S.Half)
Z = Hypergeometric("Z", 4, 2, 2)
assert factorial_moment(X, 2) == 4
assert factorial_moment(Y, 2) == S.Half
assert factorial_moment(Z, 2) == Rational(1, 3)
x, y, z, l = symbols("x y z l")
Y = Binomial("Y", 2, y)
Z = Hypergeometric("Z", 10, 2, 3)
assert factorial_moment(Y, l) == y ** 2 * FallingFactorial(2, l) + 2 * y * (
1 - y
) * FallingFactorial(1, l) + (1 - y) ** 2 * FallingFactorial(0, l)
assert (
factorial_moment(Z, l)
== 7 * FallingFactorial(0, l) / 15
+ 7 * FallingFactorial(1, l) / 15
+ FallingFactorial(2, l) / 15
)
def test_dependence():
X, Y = Die("X"), Die("Y")
assert independent(X, 2 * Y)
assert not dependent(X, 2 * Y)
X, Y = Normal("X", 0, 1), Normal("Y", 0, 1)
assert independent(X, Y)
assert dependent(X, 2 * X)
# Create a dependency
XX, YY = given(Tuple(X, Y), Eq(X + Y, 3))
assert dependent(XX, YY)
def test_dependent_finite():
X, Y = Die("X"), Die("Y")
# Dependence testing requires symbolic conditions which currently break
# finite random variables
assert dependent(X, Y + X)
XX, YY = given(Tuple(X, Y), X + Y > 5) # Create a dependency
assert dependent(XX, YY)
def test_normality():
X, Y = Normal("X", 0, 1), Normal("Y", 0, 1)
x = Symbol("x", real=True, finite=True)
z = Symbol("z", real=True, finite=True)
dens = density(X - Y, Eq(X + Y, z))
assert integrate(dens(x), (x, -oo, oo)) == 1
def test_Density():
X = Die("X", 6)
d = Density(X)
assert d.doit() == density(X)
def test_NamedArgsMixin():
class Foo(Basic, NamedArgsMixin):
_argnames = "foo", "bar"
a = Foo(1, 2)
assert a.foo == 1
assert a.bar == 2
raises(AttributeError, lambda: a.baz)
class Bar(Basic, NamedArgsMixin):
pass
raises(AttributeError, lambda: Bar(1, 2).foo)
def test_density_constant():
assert density(3)(2) == 0
assert density(3)(3) == DiracDelta(0)
def test_real():
x = Normal("x", 0, 1)
assert x.is_real
def test_issue_10052():
X = Exponential("X", 3)
assert P(X < oo) == 1
assert P(X > oo) == 0
assert P(X < 2, X > oo) == 0
assert P(X < oo, X > oo) == 0
assert P(X < oo, X > 2) == 1
assert P(X < 3, X == 2) == 0
raises(ValueError, lambda: P(1))
raises(ValueError, lambda: P(X < 1, 2))
def test_issue_11934():
density = {0: 0.5, 1: 0.5}
X = FiniteRV("X", density)
assert E(X) == 0.5
assert P(X >= 2) == 0
def test_issue_8129():
X = Exponential("X", 4)
assert P(X >= X) == 1
assert P(X > X) == 0
assert P(X > X + 1) == 0
def test_issue_12237():
X = Normal("X", 0, 1)
Y = Normal("Y", 0, 1)
U = P(X > 0, X)
V = P(Y < 0, X)
W = P(X + Y > 0, X)
assert W == P(X + Y > 0, X)
assert | |
<reponame>rinrini001/omega-miya<filename>omega_miya/utils/omega_plugin_utils/http_fetcher.py
import os
import aiohttp
import aiofiles
import nonebot
from urllib.parse import urlparse
from http.cookies import SimpleCookie as SimpleCookie_
from asyncio.exceptions import TimeoutError as TimeoutError_
from dataclasses import dataclass
from typing import Dict, List, Union, Iterable, Optional, Any
from nonebot import logger
from omega_miya.database import DBStatus
global_config = nonebot.get_driver().config
ENABLE_PROXY = global_config.enable_proxy
ENABLE_FORCED_PROXY = global_config.enable_forced_proxy
PROXY_ADDRESS = global_config.proxy_address
PROXY_PORT = global_config.proxy_port
class HttpFetcher(object):
DEFAULT_HEADERS = {'accept': '*/*',
'accept-encoding': 'gzip, deflate',
'accept-language': 'zh-CN,zh;q=0.9',
'dnt': '1',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'sec-gpc': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.114 Safari/537.36'}
@dataclass
class __FetcherResult:
error: bool
info: str
status: int
headers: dict
cookies: Optional[SimpleCookie_]
def success(self) -> bool:
if not self.error:
return True
else:
return False
@dataclass
class FetcherJsonResult(__FetcherResult):
result: dict
def __repr__(self):
return f'<FetcherJsonResult(' \
f'error={self.error}, status={self.status}, info={self.info}, result={self.result})>'
@dataclass
class FetcherTextResult(__FetcherResult):
result: str
def __repr__(self):
return f'<FetcherTextResult(' \
f'error={self.error}, status={self.status}, info={self.info}, result={self.result})>'
@dataclass
class FetcherBytesResult(__FetcherResult):
result: bytes
def __repr__(self):
return f'<FetcherBytesResult(' \
f'error={self.error}, status={self.status}, info={self.info}, result={self.result})>'
@dataclass
class FormData(aiohttp.FormData):
def __init__(
self,
fields: Iterable[Any] = (),
*,
is_multipart: bool = False,
is_processed: bool = False,
quote_fields: bool = True,
charset: Optional[str] = None,
boundary: Optional[str] = None
) -> None:
self._writer = aiohttp.multipart.MultipartWriter("form-data", boundary=boundary)
self._fields: List[Any] = []
self._is_multipart = is_multipart
self._is_processed = is_processed
self._quote_fields = quote_fields
self._charset = charset
if isinstance(fields, dict):
fields = list(fields.items())
elif not isinstance(fields, (list, tuple)):
fields = (fields,)
self.add_fields(*fields)
@classmethod
async def __get_proxy(cls, always_return_proxy: bool = False) -> Optional[str]:
if always_return_proxy:
return f'http://{PROXY_ADDRESS}:{PROXY_PORT}'
if not all([ENABLE_PROXY, PROXY_ADDRESS, PROXY_PORT]):
return None
if not ENABLE_PROXY:
return None
# 检查proxy
if ENABLE_FORCED_PROXY:
return f'http://{PROXY_ADDRESS}:{PROXY_PORT}'
else:
proxy_status_res = await DBStatus(name='PROXY_AVAILABLE').get_status()
if proxy_status_res.result == 1:
return f'http://{PROXY_ADDRESS}:{PROXY_PORT}'
else:
return None
def __init__(
self,
timeout: Union[int, float] = 10,
attempt_limit: int = 3,
flag: str = 'aiohttp',
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None
):
self.__timeout = aiohttp.ClientTimeout(total=timeout)
self.__attempt_limit = attempt_limit
self.__headers = headers
self.__cookies = cookies
self.__flag = flag
async def download_file(
self,
url: str,
path: str,
*,
file_name: Optional[str] = None,
params: Optional[Dict[str, str]] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherTextResult:
"""
下载文件
:param url: 链接
:param path: 下载文件夹路径
:param file_name: 文件名
:param params: 请求参数
:param force_proxy: 强制代理
:param kwargs: ...
:return:
"""
# 检查保存文件路径
folder_path = os.path.abspath(path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
if file_name:
file_path = os.path.abspath(os.path.join(folder_path, file_name))
else:
file_name = os.path.basename(urlparse(url).path) if os.path.basename(urlparse(url).path) else str(hash(url))
file_path = os.path.abspath(os.path.join(folder_path, file_name))
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.get(
url=url, params=params,
headers=self.__headers, cookies=self.__cookies, proxy=proxy, timeout=self.__timeout,
**kwargs
) as rp:
file_bytes = await rp.read()
status = rp.status
headers = dict(rp.headers)
cookies = rp.cookies
async with aiofiles.open(file_path, 'wb') as f:
await f.write(file_bytes)
result = self.FetcherTextResult(
error=False, info='Success',
status=status, headers=headers, cookies=cookies, result=file_path)
return result
except TimeoutError_:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>TimeoutError</lr> occurred '
f'in <lc>download_file</lc> attempt <y>{num_of_attempts + 1}</y>.')
except Exception as e:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>{str(e.__class__.__name__)}</lr> occurred '
f'in <lc>download_file</lc> attempt <y>{num_of_attempts + 1}</y>.\n<y>Error info</y>: {str(e)}')
finally:
num_of_attempts += 1
else:
logger.opt(colors=True).error(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>ExceededAttemptNumberError</lr> '
f'Failed too many times in <lc>download_file</lc>.\n'
f'<y>url</y>: {url}\n<y>params</y>: {params}')
return self.FetcherTextResult(
error=True, info='Failed too many times in download_file',
status=-1, headers={}, cookies=None, result='')
async def get_json(
self,
url: str,
params: Dict[str, str] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherJsonResult:
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.get(
url=url, params=params,
headers=self.__headers, cookies=self.__cookies, proxy=proxy, timeout=self.__timeout,
**kwargs
) as rp:
result_json = await rp.json()
status = rp.status
headers = dict(rp.headers)
cookies = rp.cookies
result = self.FetcherJsonResult(
error=False, info='Success',
status=status, headers=headers, cookies=cookies, result=result_json)
return result
except TimeoutError_:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>TimeoutError</lr> occurred '
f'in <lc>get_json</lc> attempt <y>{num_of_attempts + 1}</y>.')
except Exception as e:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>{str(e.__class__.__name__)}</lr> occurred '
f'in <lc>get_json</lc> attempt <y>{num_of_attempts + 1}</y>.\n<y>Error info</y>: {str(e)}')
finally:
num_of_attempts += 1
else:
logger.opt(colors=True).error(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>ExceededAttemptNumberError</lr> '
f'Failed too many times in <lc>get_json</lc>.\n'
f'<y>url</y>: {url}\n<y>params</y>: {params}')
return self.FetcherJsonResult(
error=True, info='Failed too many times in get_json',
status=-1, headers={}, cookies=None, result={})
async def get_text(
self,
url: str,
params: Dict[str, str] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherTextResult:
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.get(
url=url, params=params,
headers=self.__headers, cookies=self.__cookies, proxy=proxy, timeout=self.__timeout,
**kwargs
) as rp:
result_text = await rp.text()
status = rp.status
headers = dict(rp.headers)
cookies = rp.cookies
result = self.FetcherTextResult(
error=False, info='Success',
status=status, headers=headers, cookies=cookies, result=result_text)
return result
except TimeoutError_:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>TimeoutError</lr> occurred '
f'in <lc>get_text</lc> attempt <y>{num_of_attempts + 1}</y>.')
except Exception as e:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>{str(e.__class__.__name__)}</lr> occurred '
f'in <lc>get_text</lc> attempt <y>{num_of_attempts + 1}</y>.\n<y>Error info</y>: {str(e)}')
finally:
num_of_attempts += 1
else:
logger.opt(colors=True).error(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>ExceededAttemptNumberError</lr> '
f'Failed too many times in <lc>get_text</lc>.\n'
f'<y>url</y>: {url}\n<y>params</y>: {params}')
return self.FetcherTextResult(
error=True, info='Failed too many times in get_text',
status=-1, headers={}, cookies=None, result='')
async def get_bytes(
self,
url: str,
params: Dict[str, str] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherBytesResult:
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.get(
url=url, params=params,
headers=self.__headers, cookies=self.__cookies, proxy=proxy, timeout=self.__timeout,
**kwargs
) as rp:
result_bytes = await rp.read()
status = rp.status
headers = dict(rp.headers)
cookies = rp.cookies
result = self.FetcherBytesResult(
error=False, info='Success',
status=status, headers=headers, cookies=cookies, result=result_bytes)
return result
except TimeoutError_:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>TimeoutError</lr> occurred '
f'in <lc>get_bytes</lc> attempt <y>{num_of_attempts + 1}</y>.')
except Exception as e:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>{str(e.__class__.__name__)}</lr> occurred '
f'in <lc>get_bytes</lc> attempt <y>{num_of_attempts + 1}</y>.\n<y>Error info</y>: {str(e)}')
finally:
num_of_attempts += 1
else:
logger.opt(colors=True).error(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>ExceededAttemptNumberError</lr> '
f'Failed too many times in <lc>get_bytes</lc>.\n'
f'<y>url</y>: {url}\n<y>params</y>: {params}')
return self.FetcherBytesResult(
error=True, info='Failed too many times in get_bytes',
status=-1, headers={}, cookies=None, result=b'')
async def post_json(
self,
url: str,
params: Dict[str, str] = None,
json: Dict[str, Any] = None,
data: Union[FormData, Dict[str, Any]] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherJsonResult:
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.post(
url=url, params=params, json=json, data=data,
headers=self.__headers, cookies=self.__cookies, proxy=proxy, timeout=self.__timeout,
**kwargs
) as rp:
result_json = await rp.json()
status = rp.status
headers = dict(rp.headers)
cookies = rp.cookies
result = self.FetcherJsonResult(
error=False, info='Success',
status=status, headers=headers, cookies=cookies, result=result_json)
return result
except TimeoutError_:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>TimeoutError</lr> occurred '
f'in <lc>post_json</lc> attempt <y>{num_of_attempts + 1}</y>.')
except Exception as e:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>{str(e.__class__.__name__)}</lr> occurred '
f'in <lc>post_json</lc> attempt <y>{num_of_attempts + 1}</y>.\n<y>Error info</y>: {str(e)}')
finally:
num_of_attempts += 1
else:
logger.opt(colors=True).error(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>ExceededAttemptNumberError</lr> '
f'Failed too many times in <lc>post_json</lc>.\n'
f'<y>url</y>: {url}\n<y>params</y>: {params}\n<y>json</y>: {json}\n<y>data</y>: {data}')
return self.FetcherJsonResult(
error=True, info='Failed too many times in post_json',
status=-1, headers={}, cookies=None, result={})
async def post_text(
self,
url: str,
params: Dict[str, str] = None,
json: Dict[str, Any] = None,
data: Union[FormData, Dict[str, Any]] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherTextResult:
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.post(
url=url, params=params, json=json, data=data,
headers=self.__headers, cookies=self.__cookies, proxy=proxy, timeout=self.__timeout,
**kwargs
) as rp:
result_text = await rp.text()
status = rp.status
headers = dict(rp.headers)
cookies = rp.cookies
result = self.FetcherTextResult(
error=False, info='Success',
status=status, headers=headers, cookies=cookies, result=result_text)
return result
except TimeoutError_:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>TimeoutError</lr> occurred '
f'in <lc>post_text</lc> attempt <y>{num_of_attempts + 1}</y>.')
except Exception as e:
logger.opt(colors=True).warning(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>{str(e.__class__.__name__)}</lr> occurred '
f'in <lc>post_text</lc> attempt <y>{num_of_attempts + 1}</y>.\n<y>Error info</y>: {str(e)}')
finally:
num_of_attempts += 1
else:
logger.opt(colors=True).error(
fr'<Y><lw>HttpFetcher \<{self.__flag}></lw></Y> <lr>ExceededAttemptNumberError</lr> '
f'Failed too many times in <lc>post_text</lc>.\n'
f'<y>url</y>: {url}\n<y>params</y>: {params}\n<y>json</y>: {json}\n<y>data</y>: {data}')
return self.FetcherTextResult(
error=True, info='Failed too many times in post_text',
status=-1, headers={}, cookies=None, result='')
async def post_bytes(
self,
url: str,
params: Dict[str, str] = None,
json: Dict[str, Any] = None,
data: Union[FormData, Dict[str, Any]] = None,
force_proxy: bool = False,
**kwargs: Any) -> FetcherBytesResult:
proxy = await self.__get_proxy(always_return_proxy=force_proxy)
num_of_attempts = 0
while num_of_attempts < self.__attempt_limit:
try:
async with aiohttp.ClientSession(timeout=self.__timeout) as session:
async with session.post(
url=url, params=params, | |
1.0 - 18.0/(np.sqrt(14)*np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0/3, -4.0/3, 5.0-7.0/3])
for x, y in self.cases:
dist = correlation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym)/(norm(xm)*norm(ym)))
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0],[1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
class TestSquareForm(TestCase):
def test_squareform_empty_matrix(self):
A = np.zeros((0,0))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_empty_vector(self):
v = np.zeros((0,))
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (1,1))
assert_equal(rv[0, 0], 0)
def test_squareform_1by1_matrix(self):
A = np.zeros((1,1))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_one_vector(self):
v = np.ones((1,)) * 8.3
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (2,2))
assert_equal(rv[0,1], 8.3)
assert_equal(rv[1,0], 8.3)
def test_squareform_one_binary_vector(self):
# Tests squareform on a 1x1 binary matrix; conversion to double was
# causing problems (see pull request 73).
v = np.ones((1,), dtype=np.bool)
rv = squareform(v)
assert_equal(rv.shape, (2,2))
assert_(rv[0,1])
def test_squareform_2by2_matrix(self):
A = np.zeros((2,2))
A[0,1] = 0.8
A[1,0] = 0.8
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (1,))
assert_equal(rA[0], 0.8)
def test_squareform_multi_matrix(self):
for n in xrange(2, 5):
yield self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in xrange(0, s[0]):
for j in xrange(i+1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
class TestNumObsY(TestCase):
def test_num_obs_y_multi_matrix(self):
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
assert_raises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in xrange(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM(TestCase):
def test_num_obs_dm_multi_matrix(self):
for n in xrange(1, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
assert_equal(num_obs_dm(A), n)
def test_num_obs_dm_0(self):
# Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
assert_(self.check_D(0))
def test_num_obs_dm_1(self):
# Tests num_obs_dm(D) on a 1x1 distance matrix.
assert_(self.check_D(1))
def test_num_obs_dm_2(self):
assert_(self.check_D(2))
def test_num_obs_dm_3(self):
assert_(self.check_D(2))
def test_num_obs_dm_4(self):
assert_(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM(TestCase):
def test_is_valid_dm_int16_array_E(self):
# Tests is_valid_dm(*) on an int16 array. Exception expected.
D = np.zeros((5, 5), dtype='i')
assert_raises(TypeError, is_valid_dm_throw, (D))
def test_is_valid_dm_int16_array_F(self):
D = np.zeros((5, 5), dtype='i')
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_1D_E(self):
D = np.zeros((5,), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
D = np.zeros((5,), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_3D_E(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_nonzero_diagonal_E(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_asymmetric_E(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_asymmetric_F(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_correct_1_by_1(self):
D = np.zeros((1,1), dtype=np.double)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_2_by_2(self):
y = np.random.rand(1)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_3_by_3(self):
y = np.random.rand(3)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_4_by_4(self):
y = np.random.rand(6)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_5_by_5(self):
y = np.random.rand(10)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY(TestCase):
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_int16_array_E(self):
y = np.zeros((10,), dtype='i')
assert_raises(TypeError, is_valid_y_throw, (y))
def test_is_valid_y_int16_array_F(self):
y = np.zeros((10,), dtype='i')
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3,3,), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3,3,), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
# Raise ValueError if p < 1.
p = 0.5
assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(canberra([1,2,3], [2,4,6]), 1)
assert_equal(canberra([1,1,0,0], [1,0,1,0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(braycurtis([1,2,3], [2,4,6]), 1./3, decimal=15)
assert_almost_equal(braycurtis([1,1,0,0], [1,0,1,0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(sqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(euclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
assert_almost_equal(euclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
assert_almost_equal(sqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
assert_almost_equal(sqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, euclidean, x, x)
assert_raises(ValueError, sqeuclidean, x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = euclidean(x, y)
d2 = sqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_hamming_unequal_length():
# Regression test for gh-4290.
x = [0, 0, 1]
y = [1, 0, 1, 0]
# Used to give an AttributeError from ndarray.mean called on bool
assert_raises(ValueError, hamming, x, y)
def test_hamming_string_array():
# https://github.com/scikit-learn/scikit-learn/issues/4014
a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
dtype='|S4')
b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
dtype='|S4')
desired = 0.45
assert_allclose(hamming(a, b), desired)
def test_sqeuclidean_dtypes():
# Assert that sqeuclidean returns the right types of values.
# Integer types should be converted to floating for stability.
# Floating point types should be the same as the input.
x = [1, 2, 3]
y = [4, 5, 6]
for dtype in [np.int8, np.int16, np.int32, np.int64]:
d = sqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_(np.issubdtype(d.dtype, np.floating))
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
d1 = sqeuclidean([0], np.asarray([-1], dtype=dtype))
d2 = sqeuclidean(np.asarray([-1], dtype=dtype), [0])
assert_equal(d1, d2)
assert_equal(d1, np.float64(np.iinfo(dtype).max) ** 2)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in ['float16', 'float128']:
# These aren't present in older numpy versions; float128 may also not
# be present on all platforms.
if hasattr(np, dtype):
dtypes.append(getattr(np, dtype))
for dtype in dtypes:
d = sqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_equal(d.dtype, dtype)
def test_sokalmichener():
# Test that sokalmichener has the same result for bool and int inputs.
p = [True, True, False]
q = [True, False, True]
x = [int(b) for b in p]
y = [int(b) for b in q]
dist1 = sokalmichener(p, q)
dist2 = sokalmichener(x, y)
# These should be exactly the same.
assert_equal(dist1, dist2)
def test__validate_vector():
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x | |
optics_type; /* Optics type (code)*/
INT32 optics_dx; /* Optics param. - (size microns) */
INT32 optics_dy; /* Optics param. - (size microns) */
INT32 optics_wavelength; /* Optics param. - (size microns) */
INT32 optics_dispersion; /* Optics param. - (*10E6) */
INT32 optics_crossfire_x; /* Optics param. - (microRadians) */
INT32 optics_crossfire_y; /* Optics param. - (microRadians) */
INT32 optics_angle; /* Optics param. - (monoch. 2theta - microradians) */
INT32 optics_polarization_x; /* () */
INT32 optics_polarization_y; /* () */
char reserve_optics[4*sizeof(INT32)];
char reserve5[((32-28)*sizeof(INT32))]; /* Pad X-ray parameters to 128 bytes */
/* File parameters (1024 bytes) */
char filetitle[128]; /* Title */
char filepath[128]; /* path name for data file */
char filename[64]; /* name of data file */
char acquire_timestamp[32]; /* date and time of acquisition */
char header_timestamp[32]; /* date and time of header update */
char save_timestamp[32]; /* date and time file saved */
char file_comment[512]; /* comments - can be used as desired */
char reserve6[1024-(128+128+64+(3*32)+512)]; /* Pad File parameters to 1024 bytes */
/* Dataset parameters (512 bytes) */
char dataset_comment[512]; /* comments - can be used as desired */
/* Reserved for user definable data - will not be used by Mar! */
char user_data[512];
/* char pad[----] USED UP! */ /* pad out to 3072 bytes */
} frame_header;
"""
import struct as st
import array as ar
MAXIMAGES=9
class marFrame():
'''A class to extract correct mar header and image info from a MarCCD file
:param str File: file object [from open()]
:param byteOrd: '<' (default) or '>'
:param dict IFD: ?
'''
def __init__(self,File,byteOrd='<',IFD={}):
# simple TIFF header info
self.TIFFsizeX = IFD[256][2][0]
self.TIFFsizeY = IFD[257][2][0]
self.TIFFbitDepth = IFD[258][2][0]
self.TIFFcompression = IFD[259][2][0] # 1 = no compression
self.TIFFphotometricInterpretation = IFD[262][2][0] # 1 = bilevel or grayscale where 0 is imaged as black
self.TIFFstripOffsets = IFD[273][2][0] # seems to be 4096 for marCCD
self.TIFForientation = IFD[274][2][0] # 1 = 0th row it top, 0th column is left
self.TIFFrowsPerStrip = IFD[278][2][0] # varies based on image size
self.TIFFstripByteCounts = IFD[279][2][0] # number of bytes in a strip also varies based on size
self.TIFFxResolution = IFD[282][2][0] # pixels per resolutionUnit in X direction (ImageWidth direction)
self.TIFFyResolution = IFD[283][2][0] # pixels per resolutionUnit in Y direction (ImageLength direction
self.TIFFresolutionUnit = IFD[296][2][0] # 3 = centimeter
self.byteDepth = self.TIFFbitDepth//8
self.arrayTypeCode = ['','B','H','I','I'][self.byteDepth]
# MarCCD specific header info
File.seek(IFD[34710][2][0])
self.headerType = st.unpack(byteOrd+'I',File.read(4))[0] #/* flag for header type (can be used as magic number) */
self.headerName = b''.join(st.unpack(byteOrd+16*'s',File.read(16))).replace(b'\x00',b'')
self.headerMajorVersion = st.unpack(byteOrd+'I',File.read(4))[0] #/* header_major_version (n.) */
self.headerMinorVersion = st.unpack(byteOrd+'I',File.read(4))[0] #/* header_minor_version (.n) */
self.headerByteOrder = st.unpack(byteOrd+'I',File.read(4))[0] #/* BIG_ENDIAN (Motorola,MIPS); LITTLE_ENDIAN (DEC, Intel) */
self.dataByteOrder = st.unpack(byteOrd+'I',File.read(4))[0] #/* BIG_ENDIAN (Motorola,MIPS); LITTLE_ENDIAN (DEC, Intel) */
self.headerSize = st.unpack(byteOrd+'I',File.read(4))[0] #/* in bytes */
self.frameType = st.unpack(byteOrd+'I',File.read(4))[0] #/* flag for frame type */
self.magicNumber = st.unpack(byteOrd+'I',File.read(4))[0] #/* to be used as a flag - usually to indicate new file */
self.compressionType = st.unpack(byteOrd+'I',File.read(4))[0] #/* type of image compression */
self.compression1 = st.unpack(byteOrd+'I',File.read(4))[0] #/* compression parameter 1 */
self.compression2 = st.unpack(byteOrd+'I',File.read(4))[0] #/* compression parameter 2 */
self.compression3 = st.unpack(byteOrd+'I',File.read(4))[0] #/* compression parameter 3 */
self.compression4 = st.unpack(byteOrd+'I',File.read(4))[0] #/* compression parameter 4 */
self.compression5 = st.unpack(byteOrd+'I',File.read(4))[0] #/* compression parameter 4 */
self.compression6 = st.unpack(byteOrd+'I',File.read(4))[0] #/* compression parameter 4 */
self.nheaders = st.unpack(byteOrd+'I',File.read(4))[0] #/* total number of headers */
self.nfast = st.unpack(byteOrd+'I',File.read(4))[0] #/* number of pixels in one line */
self.nslow = st.unpack(byteOrd+'I',File.read(4))[0] #/* number of lines in image */
self.depth = st.unpack(byteOrd+'I',File.read(4))[0] #/* number of bytes per pixel */
self.recordLength = st.unpack(byteOrd+'I',File.read(4))[0] #/* number of pixels between succesive rows */
self.signifBits = st.unpack(byteOrd+'I',File.read(4))[0] #/* true depth of data, in bits */
self.dataType = st.unpack(byteOrd+'I',File.read(4))[0] #/* (signed,unsigned,float...) */
self.saturatedValue = st.unpack(byteOrd+'I',File.read(4))[0] #/* value marks pixel as saturated */
self.sequence = st.unpack(byteOrd+'I',File.read(4))[0] #/* TRUE or FALSE */
self.nimages = st.unpack(byteOrd+'I',File.read(4))[0] #/* total number of images - size of each is nfast*(nslow/nimages) */
self.origin = st.unpack(byteOrd+'I',File.read(4))[0] #/* corner of origin */
self.orientation = st.unpack(byteOrd+'I',File.read(4))[0] #/* direction of fast axis */
self.viewDirection = st.unpack(byteOrd+'I',File.read(4))[0] #/* direction to view frame */
self.overflowLocation = st.unpack(byteOrd+'I',File.read(4))[0] #/* FOLLOWING_HEADER, FOLLOWING_DATA */
self.over8Bits = st.unpack(byteOrd+'I',File.read(4))[0] #/* # of pixels with counts > 255 */
self.over16Bits = st.unpack(byteOrd+'I',File.read(4))[0] #/* # of pixels with count > 65535 */
self.multiplexed = st.unpack(byteOrd+'I',File.read(4))[0] #/* multiplex flag */
self.nfastimages = st.unpack(byteOrd+'I',File.read(4))[0] #/* # of images in fast direction */
self.nslowimages = st.unpack(byteOrd+'I',File.read(4))[0] #/* # of images in slow direction */
self.darkcurrentApplied = st.unpack(byteOrd+'I',File.read(4))[0] #/* flags correction has been applied - hold magic number ? */
self.biasApplied = st.unpack(byteOrd+'I',File.read(4))[0] #/* flags correction has been applied - hold magic number ? */
self.flatfieldApplied = st.unpack(byteOrd+'I',File.read(4))[0] #/* flags correction has been applied - hold magic number ? */
self.distortionApplied = st.unpack(byteOrd+'I',File.read(4))[0] #/* flags correction has been applied - hold magic number ? */
self.originalHeaderType = st.unpack(byteOrd+'I',File.read(4))[0] #/* Header/frame type from file that frame is read from */
self.fileSaved = st.unpack(byteOrd+'I',File.read(4))[0] #/* Flag that file has been saved, should be zeroed if modified */
self.nValidPixels = st.unpack(byteOrd+'I',File.read(4))[0] #/* Number of pixels holding valid data - first N pixels */
self.defectmapApplied = st.unpack(byteOrd+'I',File.read(4))[0] #/* flags correction has been applied - hold magic number ? */
self.subimageNfast = st.unpack(byteOrd+'I',File.read(4))[0] #/* when divided into subimages (eg. frameshifted) */
self.subimageNslow = st.unpack(byteOrd+'I',File.read(4))[0] #/* when divided into subimages (eg. frameshifted) */
self.subimageOriginFast = st.unpack(byteOrd+'I',File.read(4))[0] #/* when divided into subimages (eg. frameshifted) */
self.subimageOriginSlow = st.unpack(byteOrd+'I',File.read(4))[0] #/* when divided into subimages (eg. frameshifted) */
self.readoutPattern = st.unpack(byteOrd+'I',File.read(4))[0] #/* BIT Code - 1 = A, 2 = B, 4 = C, 8 = D */
self.saturationLevel = st.unpack(byteOrd+'I',File.read(4))[0] #/* at this value and above, data are not reliable */
self.orientationCode = st.unpack(byteOrd+'I',File.read(4))[0] #/* Describes how this frame needs to be rotated to make it "right" */
self.frameshiftMultiplexed = st.unpack(byteOrd+'I',File.read(4))[0] #/* frameshift multiplex flag */
self.prescanNfast = st.unpack(byteOrd+'I',File.read(4))[0] #/* Number of non-image pixels preceeding imaging pixels - fast direction */
self.prescanNslow = st.unpack(byteOrd+'I',File.read(4))[0] #/* Number of non-image pixels preceeding imaging pixels - slow direction */
self.postscanNfast = st.unpack(byteOrd+'I',File.read(4))[0] #/* Number of non-image pixels followng imaging pixels - fast direction */
self.postscanNslow = st.unpack(byteOrd+'I',File.read(4))[0] #/* Number of non-image pixels followng imaging pixels - slow direction */
self.prepostTrimmed = st.unpack(byteOrd+'I',File.read(4))[0] #/* trimmed==1 means pre and post scan pixels have been removed */
File.seek(IFD[34710][2][0]+256)
#self.totalCounts = st.unpack(byteOrd+'Q',File.read(8))[0] # /* 64 bit integer range = 1.85E19*/
#self.specialCounts1 = st.unpack(byteOrd+'Q',File.read(8))[0]
#self.specialCounts2 = st.unpack(byteOrd+'Q',File.read(8))[0]
self.totalCounts = st.unpack(byteOrd+'II',File.read(8))
self.specialCounts1 = st.unpack(byteOrd+'II',File.read(8))
self.specialCounts2 = st.unpack(byteOrd+'II',File.read(8))
self.min = st.unpack(byteOrd+'I',File.read(4))[0]
self.max = st.unpack(byteOrd+'I',File.read(4))[0]
self.mean = st.unpack(byteOrd+'i',File.read(4))[0] # /* mean * 1000 */
self.rms = st.unpack(byteOrd+'I',File.read(4))[0] #/* rms * 1000 */
self.nZeros = st.unpack(byteOrd+'I',File.read(4))[0] #/* number of pixels with 0 value - not included in stats in unsigned data */
self.nSaturated = st.unpack(byteOrd+'I',File.read(4))[0] #/* number of pixels with saturated value - not included in stats */
self.statsUptodate = st.unpack(byteOrd+'I',File.read(4))[0] #/* Flag that stats OK - ie data not changed since last calculation */
self.pixelNoise = st.unpack(byteOrd+'I'*MAXIMAGES,File.read(4*MAXIMAGES)) # /* 1000*base noise value (ADUs) */
File.seek(IFD[34710][2][0]+256+128)
self.barcode = b''.join(st.unpack(byteOrd+16*'s',File.read(16))).replace(b'\x00',b'')
self.barcodeAngle = st.unpack(byteOrd+'I',File.read(4))[0]
self.barcodeStatus = st.unpack(byteOrd+'I',File.read(4))[0]
File.seek(IFD[34710][2][0]+256+128+256)
self.xtalToDetector = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*distance in millimeters */
self.beamX = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*x beam position (pixels) */
self.beamY = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*y beam position (pixels) */
self.integrationTime = st.unpack(byteOrd+'i',File.read(4))[0] #/* integration time in milliseconds */
self.exposureTime = st.unpack(byteOrd+'i',File.read(4))[0] #/* exposure time in milliseconds */
self.readoutTime = st.unpack(byteOrd+'i',File.read(4))[0] #/* readout time in milliseconds */
self.nreads = st.unpack(byteOrd+'i',File.read(4))[0] #/* number of readouts to get this image */
self.startTwotheta = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*two_theta angle */
self.startOmega = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*omega angle */
self.startChi = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*chi angle */
self.startKappa = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*kappa angle */
self.startPhi = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*phi angle */
self.startDelta = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*delta angle */
self.startGamma = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*gamma angle */
self.startXtalToDetector = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*distance in mm (dist in um)*/
self.endTwotheta = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*two_theta angle */
self.endOmega = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*omega angle */
self.endChi = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*chi angle */
self.endKappa = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*kappa angle */
self.endPhi = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*phi angle */
self.endDelta = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*delta angle */
self.endGamma = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*gamma angle */
self.endXtalToDetector = st.unpack(byteOrd+'i',File.read(4))[0] #/* 1000*distance in mm (dist | |
0],
help="Monkhorst-Pack kpoint grid, in format like --kpoints-mp 1 1 1 0 0 0")
gp.add_argument("--kpoints-mp-nscf", type=int, nargs=6,
default=[3, 3, 3, 0, 0, 0],
help="Monkhorst-Pack kpoint grid, in format like --kpoints-mp 3 3 3 0 0 0")
gp.add_argument("--kpath-manual", type=str, nargs="+", default=None,
help="manual input kpath in crystal_b, like --kpath-manual '0.000000 0.000000 0.000000 GAMMA 5' '0.500000 0.000000 0.000000 X 5' '0.0000 0.000 0.50000 A |' '0.5 0.5 0.5 R '")
gp.add_argument("--kpath-file", type=str,
help="manual input kpath in crystal_b read from the file")
# ATOMIC_FORCES
gp = subparser.add_argument_group(title="ATOMIC_FORCES")
gp.add_argument("--pressure", type=float, default=None,
help="specify pressure acting on system in unit of Pa")
gp.add_argument("--pressuredir", type=str, default=None,
choices=["x", "y", "z"],
help="specify direction of pressure acting on system.")
# projwfc
gp = subparser.add_argument_group(title="projwfc")
gp.add_argument("--projwfc-filpdos", type=str, default="projwfc",
help="output projected dos file name")
gp.add_argument("--projwfc-ngauss", type=str, default="default",
help="gaussian broadening type")
gp.add_argument("--projwfc-degauss", type=str, default='default',
help="gaussian broadening")
gp.add_argument("--projwfc-emin", type=str, default='default',
help="min energy for DOS")
gp.add_argument("--projwfc-emax", type=str, default='default',
help="max energy for DOS")
gp.add_argument("--projwfc-deltae", type=str, default='default',
help="DeltaE: energy grid step (eV)")
# bands.x related parameters
# -----------------------------------------
gp = subparser.add_argument_group(title="bands.x")
gp.add_argument("--lsym", type=str, default=".true.",
choices=[".true.", ".false."],
help="set lsym variable in bands.x input.")
# na nc stepa stepc
# -----------------------------------------
gp = subparser.add_argument_group(title="cell optimization",
description="setting parameters needed by matflow cubic, hexagonal, tetragonal cell optimization")
gp.add_argument("--na", type=int, default=10,
help="number of a used")
gp.add_argument("--nc", type=int, default=10,
help="number of c used")
gp.add_argument("--stepa", type=float, default=0.05,
help="a step")
gp.add_argument("--stepc", type=float, default=0.05,
help="c step")
# neb
gp = subparser.add_argument_group(title="neb")
gp.add_argument("--string-method", type=str, default="neb",
help="string_method")
gp.add_argument("--nstep-path", type=int, default=100,
help="nstep_path")
gp.add_argument("--opt-scheme", type=str, default="broyden",
help="Specify the type of optimization scheme(sd, broyden, broyden2, quick-min, langevin)")
gp.add_argument("--num-of-images", type=int, default=5,
help="number of total images(including the initial and final image). about how to set proper number of images: usually the inter-image distance between 1~2Bohr is OK")
gp.add_argument("--k-max", type=float, default=0.3e0,
help="Set them to use a Variable Elastic Constants scheme elastic constants are in the range [ k_min, k_max ], this is useful to rise the resolution around the saddle point")
gp.add_argument("--k-min", type=float, default=0.2e0,
help="Set them to use a Variable Elastic Constants scheme elastic constants are in the range [ k_min, k_max ], this is useful to rise the resolution around the saddle point")
gp.add_argument("--ci-scheme", type=str, default="auto",
help="Specify the type of Climbing Image scheme(no-CI, auto, manual)")
gp.add_argument("--path-thr", type=float, default=0.05,
help="path_thr")
gp.add_argument("--ds", type=float, default=1.e0, help="Optimisation step length ( Hartree atomic units )")
gp.add_argument("--first-last-opt", type=str, default=None,
choices=[".true.", ".false."],
help="whether to optimize the first and last image")
# for phx
# --------------------------------------------------------------
gp = subparser.add_argument_group(title="ph.x")
gp.add_argument("--tr2-ph", type=float, default=1.0e-14,
help="threshold for self-consistency.")
gp.add_argument("--nq", type=int, nargs="+",
default=[0, 0, 0],
help="set value of nq1 nq2 nq3.")
gp.add_argument("--epsil", type=str, default=None,
choices=[".true.", ".false."],
help="set epsil in inputph")
gp.add_argument("--lraman", type=str, default=None,
choices=[".true.", ".false."],
help="set lraman, can be 'true' or 'false' only. default is None which means 'false' in real world.")
gp.add_argument("--search-sym", type=str, default=None,
choices=[".true.", ".false."],
help="set it to .false. if you want to disable the mode symmetry analysis.")
# Phonopy
# ---------------------------------------------------------
gp = subparser.add_argument_group(title="phonopy")
gp.add_argument("--supercell-n", type=int, nargs="+",
default=[1, 1, 1],
help="supercell build for Phonopy.")
gp = subparser.add_argument_group(title="pp.x")
# pp.x
gp.add_argument("--plot-num", type=int, nargs="+", default=[0],
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 17, 18, 19, 20, 21],
help="""
type of analysis stored in the filplot file for later plot, 0: electron-pseudo-charge-density,
1: total-potential,
2: local-ionic-potential,
3: ldos,
4: local-density-of-electronic-entropy,
5: stm,
6: spin-polar,
7: molecular-orbitals,
8: electron-local-function,
9: charge-density-minus-superposition-of-atomic-densities,
10: ILDOS,
11: v_bare+v_H-potential,
12: sawtooth-electric-field-potential,
13: nocollinear-magnetization,
17: all-electron-charge-density-paw-only,
18: exchage-correlation-magnetic-field-noncollinear-case,
19: reduced-density-gradient,
20: product-of-charge-density-with-hessian,
21: all-electron-density-paw-only,""")
gp.add_argument("--iflag", type=int,
default=3,
choices=[0, 1, 2, 3, 4],
help="dimension of the plot. 0: 1D plot of the spherical average, 1: 1D plot, 2: 2D plot, 3: 3D plot, 4: 2D polar plot on a sphere")
gp.add_argument("--output-format", type=int, default=5,
choices=[0, 1, 2, 3, 4, 5, 6, 7],
help="output file format for visualization. 0: gnuplot(1D), 1: no longer supported, 2: plotrho(2D), 3: XCRYSDEN(2d), 4: no longer supported, 5: XCRYSDEN(3D), 6: gaussian cube(3D), 7: gnuplot(2D)")
# --------------------------------------------------------------------------
# SIESTA
# --------------------------------------------------------------------------
subparser = subparsers.add_parser("siesta", help="using siesta as calculator")
gp = subparser.add_argument_group(title="overall running control:")
gp.add_argument("-r", "--runtype", type=int, default=0,
choices=[0, 1, 2, 3, 4, 5],
help="choices of runtype. 0->static_run; 1->optimization; 2->cubic-cell; 3->hexagonal-cell; 4->tetragonal-cell; 5->phonopy; 6->molecular dynamics")
gp.add_argument("-d", "--directory", type=str, default="matflow-running",
help="Directory for the running.")
gp.add_argument("--runopt", type=str, default="gen",
choices=["gen", "run", "genrun"],
help="Generate or run or both at the same time.")
gp.add_argument("--auto", type=int, default=3,
choices=[0, 1, 2, 3],
help="auto:0 nothing, 1: copying files to server, 2: copying and executing, 3: pymatflow run inserver with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|llhpc].conf")
# calypso input.dat template
gp = subparser.add_argument_group(title="template",
description="read in Calypso input.dat template")
gp.add_argument("--input-dat", type=str, default=None,
help="specify Calypso input.dat template to set parameters")
# -----------------------------------------------------------------
# run params
# -----------------------------------------------------------------
gp.add_argument("--mpi", type=str, default="",
help="MPI command: like 'mpirun -np 4'")
gp.add_argument("--server", type=str, default="pbs",
choices=["pbs", "llhpc"],
help="type of remote server, can be pbs or llhpc")
gp.add_argument("--jobname", type=str, default="matflow-job",
help="jobname on the pbs server")
gp.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
gp.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
gp.add_argument("--queue", type=str, default=None,
help="the queue to submit to job, default is not set")
# llhpc
gp.add_argument("--partition", type=str, default="free",
help="choose partition to submit job")
gp.add_argument("--ntask", type=int, default=24,
help="choose task number")
gp.add_argument("--stdout", type=str, default="slurm.out",
help="set standard out")
gp.add_argument("--stderr", type=str, default="slurm.err",
help="set standard err")
# structure file: either xyz or cif. they are exclusive
# actually this can be put in the main subparser, but it will make the command not like git sub-cmmand
# so we put them in every subsubparser
structfile = subparser.add_mutually_exclusive_group(required=True) # at leaset one of cif and xyz is provided
# argparse will make sure only one of argument in structfile(xyz, cif) appear on command line
structfile.add_argument("--xyz", type=str, default=None,
help="The xyz structure file with the second line specifying the cell parameter")
structfile.add_argument("--cif", type=str, default=None,
help="The cif structure file")
structfile.add_argument("--xsd", type=str, default=None,
help="The xsd structure file")
structfile.add_argument("--xsf", type=str, default=None,
help="The xsf structure file")
# potential file
gp = subparser.add_argument_group(title="pseudopotential")
gp.add_argument("--pot", type=str, default="./",
help="specify the path to dir containing all the needed pseudopotential, default behavior is find them in the current directory automatically. if you pass 'auto' to it, matflow will get the pots automatically(need simple configuration, see manual)")
# --------------------------------------------------------------------------
gp = subparser.add_argument_group(title="electronic")
gp.add_argument("--meshcutoff", type=int, default=200,
help="MeshCutoff (Ry)")
gp.add_argument("--solution-method", type=str, default="diagon",
choices=["diagon", "OMM", "OrderN", "PEXSI"],
help="SolutionMethod(diagon, OMM, OrderN, PEXSI)")
gp.add_argument("--functional", type=str, default="GGA",
help="XC.functional")
gp.add_argument("--authors", type=str, default="PBE",
help="XC.authors")
gp.add_argument("--tolerance", type=float, default=1.0e-6,
help="DM.Tolerance")
gp.add_argument("--numberpulay", type=int, default=8,
help="DM.NumberPulay")
gp.add_argument("--mixing", type=float, default=0.1,
help="DM.MixingWeight")
gp.add_argument("--kpoints-mp", type=int, nargs="+",
default=[3, 3, 3],
help="set kpoints like '3 3 3'")
gp.add_argument("--kpath-manual", type=str, nargs="+", default=None,
help="manual input kpath for band structure calculation")
gp.add_argument("--kpath-file", type=str,
help="file to read the kpath for band structure calculation")
gp.add_argument("--occupation", type=str, default="FD",
choices=["FD", "MP"],
help="OccupationFunction(FD or MP)")
gp.add_argument("--electronic-temperature", type=int, default=300,
help="Electronic Temperature")
gp.add_argument("--pao-fix-split-table", type=str, default=None,
choices=["T", "F"],
help="can fix problem with small split_norm WARNING")
# properties related parameter
# ------------------------------
gp = subparser.add_argument_group(title="properties")
gp.add_argument("-p", "--properties" ,nargs="+", type=int, default=[],
help="Option for properties calculation. 1->PDOS; 2->LDOS; 3->Bands; 4->Charge Density; 5->Chemical analysis; 6->Macro Polarization; 7->Net Charge Dipole Electric Field; 8->Optical; 9->Wannier90 ")
gp.add_argument("--pdos-block", type=float, nargs="+",
default=[-20, 10, 0.2, 500])
gp.add_argument("--polarization-grids", nargs="+", type=str,
default=["10 3 3 no", "2 20 2 no", "4 4 15 no"],
help="PolarizationGrids")
gp.add_argument("--external-electric-field", nargs="+", type=float,
default=[0.0, 0.0, 0.5],
help="External Electric field")
gp.add_argument("--optical-energy-minimum", type=float,
default=0.0,
help="Optical.Energy.Minimum")
gp.add_argument("--optical-energy-maximum", type=float,
default=10.0,
help="Optical.Energy.Maximum")
gp.add_argument("--optical-broaden", type=float,
default=0.0,
help="Optical.Broaden")
gp.add_argument("--optical-scissor", type=float,
default=0.0,
help="Optical.Scissor")
gp.add_argument("--optical-mesh", nargs="+", type=int,
default=[5, 5, 5],
help="Optical.Mesh")
gp.add_argument("--optical-polarization-type", type=str,
default="unpolarized",
help="Optical.PolarizationType")
gp.add_argument("--optical-vector", nargs="+", type=float,
default=[1.0, 0.0, 0.5],
help="Optical.Vector")
gp.add_argument("--wannier90-unkgrid", nargs="+", type=int,
default=[10, 10, 10],
help="Siesta2Wannier90.UnkGrid[1-3]")
# ions relaed parameter
# ==================================================
gp = subparser.add_argument_group(title="ions")
gp.add_argument("--variablecell", type=str, default="false",
choices=["true", "false"],
help="MD.VariableCell")
gp.add_argument("--forcetol", type=float, default=0.04,
help="Force tolerance in coordinate optimization. default=0.04 eV/Ang")
gp.add_argument("--stresstol", type=float, default=1,
help="Stress tolerance in variable-cell CG optimization. default=1 GPa")
gp.add_argument("--targetpressure", type=float, default=0,
help="Target pressure for Parrinello-Rahman method, variable cell optimizations, and annealing options.")
# na nc stepa stepc
# --------------------------------------------------------------------------
gp = subparser.add_argument_group(title="cell optimization",
description="setting of parameters needed in matflow cubic, hexagonal, tetragonal cell parameters optimization")
gp.add_argument("--na", type=int, default=10,
help="number of a used")
gp.add_argument("--nc", type=int, default=10,
help="number of c | |
Parameters
----------
transforms: list of itk.TransformBaseTemplate[itk.D]
Python list of the transforms to write.
filename:
Path to the transform file (typically a .h5 file).
compression:
Use compression, if the file format supports it.
"""
import itk
writer = itk.TransformFileWriterTemplate[itk.D].New()
writer.SetFileName(f"{filename}")
writer.SetUseCompression(compression)
for transform in transforms:
writer.AddTransform(transform)
writer.Update()
def search(s: str, case_sensitive: bool = False) -> List[str]: # , fuzzy=True):
"""Search for a class name in the itk module."""
s = s.replace(" ", "")
if not case_sensitive:
s = s.lower()
import itk
names = sorted(dir(itk))
# exact match first
if case_sensitive:
res = [n for n in names if s == n]
else:
res = [n for n in names if s == n.lower()]
# then exact match inside the name
if case_sensitive:
res += [n for n in names if s in n and s != n]
else:
res += [n for n in names if s in n.lower() and s != n.lower()]
# if fuzzy:
# try:
# everything now requires editdist
# import editdist
# if case_sensitive:
# res.sort(key=lambda x: editdist.distance(x, s))
# else:
# res.sort(key=lambda x: (editdist.distance(x.lower(), s), x))
# except:
# pass
return res
def _snake_to_camel(keyword: str):
# Helpers for set_inputs snake case to CamelCase keyword argument conversion
_snake_underscore_re = re.compile("(_)([a-z0-9A-Z])")
def _underscore_upper(match_obj):
return match_obj.group(2).upper()
camel = keyword[0].upper()
if _snake_underscore_re.search(keyword[1:]):
return camel + _snake_underscore_re.sub(_underscore_upper, keyword[1:])
return camel + keyword[1:]
def set_inputs(
new_itk_object,
inargs: Optional[Sequence[Any]] = None,
inkargs: Optional[Dict[str, Any]] = None,
):
"""Set the inputs of the given objects, according to the non named or the
named parameters in args and kargs
This function tries to assign all the non named parameters in the input of
the new_itk_object
- the first non named parameter in the first input, etc.
The named parameters are used by calling the method with the same name
prefixed by 'Set'.
set_inputs( obj, kargs={'Threshold': 10} ) calls obj.SetThreshold(10)
This is the function use in the enhanced New() method to manage the inputs.
It can be used to produce a similar behavior:
def SetInputs(self, *args, **kargs):
import itk
itk.set_inputs(self, *args, **kargs)
"""
# Fix bug with Mutable Default Arguments
# https://docs.python-guide.org/writing/gotchas/
args: List[Any] = inargs if inargs else []
kargs: Dict[str, Any] = inkargs if inkargs else {}
# try to get the images from the filters in args
args = [output(arg) for arg in args]
# args without name are filter used to set input image
#
# count SetInput calls to call SetInput, SetInput2, SetInput3, ...
# useful with filter which take 2 input (or more) like SubtractImageFiler
# Ex: subtract image2.png to image1.png and save the result in result.png
# r1 = itk.ImageFileReader.US2.New(FileName='image1.png')
# r2 = itk.ImageFileReader.US2.New(FileName='image2.png')
# s = itk.SubtractImageFilter.US2US2US2.New(r1, r2)
# itk.ImageFileWriter.US2.New(s, FileName='result.png').Update()
setInputNb: int = -1
try:
for setInputNb, arg in enumerate(args):
methodName = "SetInput%i" % (setInputNb + 1)
if methodName in dir(new_itk_object):
# first try to use methods called SetInput1, SetInput2, ...
# those method should have more chances to work in case of
# multiple input types
getattr(new_itk_object, methodName)(arg)
else:
# no method called SetInput?
# try with the standard SetInput(nb, input)
new_itk_object.SetInput(setInputNb, arg)
except TypeError as e:
# the exception have (at least) to possible reasons:
# + the filter don't take the input number as first argument
# + arg is an object of wrong type
#
# if it's not the first input, re-raise the exception
if setInputNb != 0:
raise e
# it's the first input, try to use the SetInput() method without input
# number
new_itk_object.SetInput(args[0])
# but raise an exception if there is more than 1 argument
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
except AttributeError:
# There is no SetInput() method, try SetImage
# but before, check the number of inputs
if len(args) > 1:
raise TypeError("Object accepts only 1 input.")
methodList = ["SetImage", "SetInputImage"]
methodName = None
for m in methodList:
if m in dir(new_itk_object):
methodName = m
if methodName:
getattr(new_itk_object, methodName)(args[0])
else:
raise AttributeError("No method found to set the input.")
# named args : name is the function name, value is argument(s)
for attribName, value in kargs.items():
# use Set as prefix. It allow to use a shorter and more intuitive
# call (Ex: itk.ImageFileReader.UC2.New(FileName='image.png')) than
# with the full name
# (Ex: itk.ImageFileReader.UC2.New(SetFileName='image.png'))
if attribName not in ["auto_progress", "template_parameters"]:
if attribName.islower():
attribName = _snake_to_camel(attribName)
attrib = getattr(new_itk_object, "Set" + attribName)
# Do not use try-except mechanism as this leads to
# segfaults. Instead limit the number of types that are
# tested. The list of tested type could maybe be replaced by
# a test that would check for iterables.
import itk
if type(value) in [list, tuple]:
try:
output_value = [itk.output(x) for x in value]
attrib(*output_value)
except Exception:
attrib(itk.output(value))
else:
attrib(itk.output(value))
class templated_class:
"""This class is used to mimic the behavior of the templated C++ classes.
It is used this way:
class CustomClass:
# class definition here
CustomClass = templated_class(CustomClass)
customObject = CustomClass[template, parameters].New()
The template parameters are passed to the custom class constructor as a
named parameter 'template_parameters' in a tuple.
The custom class may implement a static method
check_template_parameters(parameters) which should raise an exception if
the template parameters provided are not suitable to instantiate the custom
class.
"""
def __init__(self, cls) -> None:
"""cls is the custom class"""
self.__cls__ = cls
self.__templates__ = {}
def New(self, *args, **kargs):
"""Use the parameters to infer the types of the template parameters."""
# extract the types from the arguments to instantiate the class
import itk
types = tuple(class_(o) for o in args)
return self[types].New(*args, **kargs)
def __getitem__(self, template_parameters):
"""Return a pair class-template parameters ready to be instantiated.
The template parameters may be validated if the custom class provide
the static method check_template_parameters(parameters).
"""
if not isinstance(template_parameters, tuple):
template_parameters = (template_parameters,)
return templated_class.__templated_class_and_parameters__(
self, template_parameters
)
def check_template_parameters(self, template_parameters) -> None:
"""Check the template parameters passed in parameter."""
# this method is there mainly to make possible to reuse it in the
# custom class constructor after having used templated_class().
# Without that, the following example doesn't work:
#
# class CustomClass:
# def __init__(self, *args, **kargs):
# template_parameters = kargs["template_parameters"]
# CustomClass.check_template_parameters(template_parameters)
# other init stuff
# def check_template_parameters(template_parameters):
# check, really
# pass
# CustomClass = templated_class(CustomClass)
#
self.__cls__.check_template_parameters(template_parameters)
def add_template(self, name: str, params):
if not isinstance(params, list) and not isinstance(params, tuple):
params = (params,)
params = tuple(params)
val = self[params]
self.__templates__[params] = val
setattr(self, name, val)
def add_image_templates(self, *args) -> None:
import itk
if not args:
return
combinations = [[t] for t in args[0]]
for types in args[1:]:
temp = []
for t in types:
for c in combinations:
temp.append(c + [t])
combinations = temp
for d in itk.DIMS:
for c in combinations:
parameters = []
name = ""
for t in c:
parameters.append(itk.Image[t, d])
name += "I" + t.short_name + str(d)
self.add_template(name, tuple(parameters))
class __templated_class_and_parameters__:
"""Inner class used to store the pair class-template parameters ready
to instantiate.
"""
def __init__(self, l_templated_class, l_template_parameters) -> None:
self.__templated_class__ = l_templated_class
self.__template_parameters__ = l_template_parameters
if "check_template_parameters" in dir(l_templated_class.__cls__):
l_templated_class.__cls__.check_template_parameters(
l_template_parameters
)
def New(self, *args, **kargs):
"""A New() method to mimic the ITK default behavior, even if the
class doesn't provide any New() method.
"""
kargs["template_parameters"] = self.__template_parameters__
if "New" in dir(self.__templated_class__.__cls__):
obj = self.__templated_class__.__cls__.New(*args, **kargs)
else:
obj = self.__templated_class__.__cls__(*args, **kargs)
setattr(obj, "__template_parameters__", self.__template_parameters__)
setattr(obj, "__templated_class__", self.__templated_class__)
return obj
def __call__(self, *args, **kargs):
return self.New(*args, **kargs)
def keys(self):
return self.__templates__.keys()
def values(self):
return list(self.__templates__.values())
def items(self):
return list(self.__templates__.items())
# everything after this comment is for dict interface
# and is a copy/paste from DictMixin
# only methods to edit dictionary are not there
def __iter__(self) -> str:
yield from self.keys()
def has_key(self, key: str):
return key in self.__templates__
def __contains__(self, key: str):
return key in self
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
return self.get(key, default)
def __len__(self):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.