input
stringlengths
2.65k
237k
output
stringclasses
1 value
<reponame>rartino/rst2reveal try: import locale locale.setlocale(locale.LC_ALL, '') except: pass import os, sys, codecs import docutils.core from .RevealTranslator import RST2RevealTranslator, RST2RevealWriter # Import custom directives from .TwoColumnsDirective import * from .PygmentsDirective import * from .VideoDirective import * from .PlotDirective import * from .SmallRole import * from .VspaceRole import * from .ClassDirective import * from .ClearDirective import * from .TemplateDirective import * class Parser: """Class converting a stand-alone reST file into a Reveal.js-powered HTML5 file, using the provided options.""" def __init__(self, input_file, output_file='', theme='default', transition = 'default', stylesheet='', mathjax_path='', pygments_style='', vertical_center=False, horizontal_center=False, title_center=False, footer=False, page_number=False, controls=False, firstslide_template='', footer_template='', init_html=False, reveal_root='reveal'): """ Constructor of the Parser class. ``create_slides()`` must then be called to actually produce the presentation. Arguments: * input_file : name of the reST file to be processed (obligatory). * output_file: name of the HTML file to be generated (default: same as input_file, but with a .html extension). * theme: the name of the theme to be used ({**default**, beige, night}). * transition: the transition between slides ({**default**, cube, page, concave, zoom, linear, fade, none}). * stylesheet: a custom CSS file which extends or replaces the used theme. * mathjax_path: URL or path to the MathJax library (default: http://cdn.mathjax.org/mathjax/latest/MathJax.js). * pygments_style: the style to be used for syntax color-highlighting using Pygments. The list depends on your Pygments version, type:: from pygments.styles import STYLE_MAP print STYLE_MAP.keys() * vertical_center: boolean stating if the slide content should be vertically centered (default: False). * horizontal_center: boolean stating if the slide content should be horizontally centered (default: False). * title_center: boolean stating if the title of each slide should be horizontally centered (default: False). * footer: boolean stating if the footer line should be displayed (default: False). * page_number: boolean stating if the slide number should be displayed (default: False). * controls: boolean stating if the control arrows should be displayed (default: False). * firstslide_template: template string defining how the first slide will be rendered in HTML. * footer_template: template string defining how the footer will be rendered in HTML. The ``firstslide_template`` and ``footer_template`` can use the following substitution variables: * %(title)s : will be replaced by the title of the presentation. * %(subtitle)s : subtitle of the presentation (either a level-2 header or the :subtitle: field, if any). * %(author)s : :author: field (if any). * %(institution)s : :institution: field (if any). * %(email)s : :email: field (if any). * %(date)s : :date: field (if any). * %(is_author)s : the '.' character if the :author: field is defined, '' otherwise. * %(is_subtitle)s : the '-' character if the subtitle is defined, '' otherwise. * %(is_institution)s : the '-' character if the :institution: field is defined, '' otherwise. You can also use your own fields in the templates. """ # Input/Output files self.input_file = input_file self.output_file = output_file # Style self.theme = theme self.stylesheet = stylesheet self.transition = transition self.vertical_center=vertical_center self.horizontal_center = horizontal_center self.title_center = title_center self.write_footer=footer self.page_number=page_number self.controls=controls # MathJax if mathjax_path =='': self.mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js' else: self.mathjax_path = mathjax_path # Pygments self.pygments_style = pygments_style # Template for the first slide self.firstslide_template = firstslide_template # Temnplate for the footer self.footer_template = footer_template # Initalization html for reveal.js self.init_html = init_html # Root path to reaveal self.reveal_root = reveal_root def create_slides(self): """Creates the HTML5 presentation based on the arguments given to the constructor.""" # Copy the reveal library in the current directory self._copy_reveal() # Create the writer and retrieve the parts self.html_writer = RST2RevealWriter() self.html_writer.translator_class = RST2RevealTranslator with codecs.open(self.input_file, 'r', 'utf8') as infile: self.parts = docutils.core.publish_parts(source=infile.read(), writer=self.html_writer) # Produce the html file self._produce_output() def _copy_reveal(self): curr_dir = os.path.dirname(os.path.realpath(self.output_file)) cwd = os.getcwd() # Copy the reveal subfolder #if not os.path.isdir(curr_dir+'/reveal'): # sources_dir = os.path.abspath(os.path.dirname(__file__)+'/reveal') # import shutil # shutil.copytree(sources_dir, curr_dir+'/reveal') # Copy the rst2reveal.css if not os.path.exists(curr_dir+'/rst2reveal.css'): source_file = os.path.abspath(os.path.dirname(__file__)+'/reveal/css/rst2reveal.css') import shutil shutil.copyfile(source_file, curr_dir+'/rst2reveal.css') # Generate the Pygments CSS file self.is_pygments = False if not self.pygments_style == '': # Check if Pygments is installed try: import pygments self.is_pygments = True except: print('Warning: Pygments is not installed, the code will not be highlighted.') print('You should install it with `pip install pygments`') return os.chdir(curr_dir) import subprocess, shutil os.system("pygmentize -S "+self.pygments_style+" -f html -O bg=light > pygments.css") # Fix the bug where the literal color goes to math blocks... with codecs.open('pygments.css', 'r', 'utf8') as infile: with codecs.open('pygments.css.tmp', 'w', 'utf8') as outfile: for aline in infile: outfile.write('.highlight '+aline) shutil.move('pygments.css.tmp', 'pygments.css') os.chdir(cwd) def _produce_output(self): self.title = self.parts['title'] self._analyse_metainfo() header = self._generate_header() body = self._generate_body() footer = self._generate_footer() document_content = header + body + footer with codecs.open(self.output_file, 'w', 'utf8') as wfile: wfile.write(document_content) def _generate_body(self): body = """ <body> <div class="static-content"></div> <div class="reveal"> <div class="slides"> %(titleslide)s %(body)s </div> </div> """ % {'body': self.parts['body'], 'titleslide' : self.titleslide} return body def _analyse_metainfo(self): def clean(text): import re if len(re.findall(r'<paragraph>', text)) > 0: text = re.findall(r'<paragraph>(.+)</paragraph>', text)[0] if len(re.findall(r'<author>', text)) > 0: text = re.findall(r'<author>(.+)</author>', text)[0] if len(re.findall(r'<date>', text)) > 0: text = re.findall(r'<date>(.+)</date>', text)[0] if len(re.findall(r'<reference', text)) > 0: text = re.findall(r'<reference refuri="mailto:(.+)">', text)[0] return text self.meta_info ={'author': ''} texts=self.parts['metadata'].split('\n') for t in texts: if not t == '': name=t.split('=')[0] content=t.replace(name+'=', '') content=clean(content) self.meta_info[name]= content self._generate_titleslide() def _generate_titleslide(self): if self.parts['title'] != '': # A title has been given self.meta_info['title'] = self.parts['title'] elif not 'title' in self.meta_info.keys(): self.meta_info['title'] = '' if self.parts['subtitle'] != '': # defined with a underlined text instead of :subtitle: self.meta_info['subtitle'] = self.parts['subtitle'] elif not 'subtitle' in self.meta_info.keys(): self.meta_info['subtitle'] = '' if not 'email' in self.meta_info.keys(): self.meta_info['email'] = '' if not 'institution' in self.meta_info.keys(): self.meta_info['institution'] = '' if not 'date' in self.meta_info.keys(): self.meta_info['date'] = '' # Separators self.meta_info['is_institution'] = '-' if self.meta_info['institution'] != '' else '' self.meta_info['is_author'] = '.' if self.meta_info['author'] != '' else '' self.meta_info['is_subtitle'] = '.' if self.meta_info['subtitle'] != '' else '' if self.firstslide_template == "": self.firstslide_template = """ <section class="titleslide"> <h1>%(title)s</h1> <h3>%(subtitle)s</h3> <br> <p><a href="mailto:%(email)s">%(author)s</a> %(is_institution)s %(institution)s</p> <p><small>%(email)s</small></p> <p>%(date)s</p> </section> """ self.titleslide=self.firstslide_template % self.meta_info if self.footer_template=="": self.footer_template = """<b>%(title)s %(is_subtitle)s %(subtitle)s.</b> %(author)s%(is_institution)s %(institution)s. %(date)s""" if self.write_footer: self.footer_html = """<footer id=\"footer\">""" + self.footer_template % self.meta_info + """<b id=\"slide_number\" style=\"padding: 1em;\"></b></footer>""" elif self.page_number: self.footer_html = """<footer><b id=\"slide_number\"></b></footer>""" else: self.footer_html = "" def _generate_header(self): header="""<!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <title>%(title)s</title> <meta name="description" content="%(title)s"> %(meta)s <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=5.0, user-scalable=no"> <link rel="stylesheet" href="%(reveal_root)s/css/reveal.css"> %(pygments)s <link rel="stylesheet" href="rst2reveal.css"> <!--link rel="stylesheet" href="%(reveal_root)s/css/theme/default.css" id="theme"--> <link rel="stylesheet" href="%(reveal_root)s/css/theme/%(theme)s.css" id="theme"> <link rel="stylesheet" href="%(reveal_root)s/css/print/pdf.css" type="text/css" media="print"> <script type="text/javascript" src="%(mathjax_path)s?config=TeX-AMS-MML_HTMLorMML"></script> <!-- Extra styles --> <style> .reveal section { text-align: %(horizontal_center)s; } .reveal h2{ text-align: %(title_center)s; } </style> %(custom_stylesheet)s <!--[if lt IE 9]> <script src="%(reveal_root)s/lib/js/html5shiv.js"></script> <![endif]--> </head> """%{'title': self.title, 'meta' : self.parts['meta'], 'theme': self.theme, 'reveal_root' : self.reveal_root, 'pygments': '<link rel="stylesheet" href="pygments.css">' if self.is_pygments else '', 'mathjax_path': self.mathjax_path, 'horizontal_center': 'center' if self.horizontal_center else 'left', 'title_center': 'center' if self.title_center else 'left', 'custom_stylesheet' : '<link rel="stylesheet" href="%s">'%self.stylesheet if not self.stylesheet is '' else ''} return header def _generate_footer(self): if self.page_number: script_page_number = """ <script> // Fires each time a new slide is activated Reveal.addEventListener( 'slidechanged', function( event ) { if(event.indexh > 0) { if(event.indexv > 0) { val = event.indexh + ' - ' + event.indexv document.getElementById('slide_number').innerHTML = val; } else{ document.getElementById('slide_number').innerHTML = event.indexh; } } else { document.getElementById('slide_number').innerHTML = ''; } } ); </script>""" else: script_page_number = "" if self.init_html: footer = self.init_html else: footer=""" <script src="%(reveal_root)s/lib/js/head.min.js"></script> <script src="%(reveal_root)s/js/reveal.min.js"></script> <script> // Full list of configuration options available here: // https://github.com/hakimel/reveal.js#configuration Reveal.initialize({ controls: %(controls)s, progress: false, history: true, overview: true, keyboard: true, loop:
totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return s def func_97d13fa555774b9e84f227df329b52f2(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return total def func_bb6de3759b69441ea78b54431633b006(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return A def func_05499c2e780348f98504bf8f7b9c7d9d(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return p def func_00d72e88b8404654b956708f231daadc(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return q def func_af9e65eec45c451b8eb830b4b95345aa(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return totalsum def func_b0ff1c498786447195d14db4860ca57f(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return r def func_411779ddb1ec4ade957f1d8871308397(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return a def func_81c3b38dac0c48fca84e80de2cad5358(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return best def func_72dc98d4f7504e5387cd75838acb038e(T, infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return b def func_8ce62155b9734233b417516d60100590(p, T, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return totalsum def func_70be2c23b409422c9fdf414d68939ccb(p, T, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return b def func_e3a7d5c635e54faa89dea71f048d02ab(p, T, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return i def func_0f4fb040495b4139925341acd807c78c(p, T, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1]
import pandas as pd from Util import Util from datapreprocessor import DataPreProcessor from test_classifier import Test_Classifier from train_classifier import Train_Classifier class Texture_Classification_Deep: @staticmethod def DTD_Train_test(): device = Util.get_device() # device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) TEXTURE_LABELS = ["banded", "blotchy", "braided", "bubbly", "bumpy", "chequered", "cobwebbed", "cracked", "crosshatched", "crystalline", "dotted", "fibrous", "flecked", "freckled", "frilly", "gauzy", "grid", "grooved", "honeycombed", "interlaced", "knitted", "lacelike", "lined", "marbled", "matted", "meshed", "paisley", "perforated", "pitted", "pleated", "polka-dotted", "porous", "potholed", "scaly", "smeared", "spiralled", "sprinkled", "stained", "stratified", "striped", "studded", "swirly", "veined", "waffled", "woven", "wrinkled", "zigzagged"] IMAGE_NET_LABELS = \ ["alp", "artichoke", "Band Aid", "bathing cap", "bookshop", "bull mastiff", "butcher shop", "carbonara", "chain", "chain saw", "chainlink fence", "cheetah", "cliff dwelling", "common iguana", "confectionery", "container ship", "corn", "crossword puzzle", "dishrag", "dock", "flat-coated retriever", "gibbon", "grocery store", "head cabbage", "honeycomb", "hook", "hourglass", "jigsaw puzzle", "jinrikisha", "lakeside", "lawn mower", "maillot", "microwave", "miniature poodle", "muzzle", "notebook", "ocarina", "orangutan", "organ", "paper towel", "partridge", "rapeseed", "sandbar", "sarong", "sea urchin", "shoe shop", "shower curtain", "stone wall", "theater curtain", "tile roof", "turnstile", "vault", "velvet", "window screen", "wool", "yellow lady's slipper"] IMAGE_NET_LABELS_S2 = \ ["common iguana", "partridge", "flat-coated retriever", "bull mastiff", "miniature poodle", "cheetah", "sea urchin", "orangutan", "gibbon", "Band Aid", "bathing cap", "chain saw", "container ship", "hook", "hourglass", "jinrikisha", "lawn mower", "maillot", "microwave", "muzzle", "notebook", "ocarina", "organ", "paper towel", "sarong", "turnstile", "crossword puzzle", "yellow lady's slipper" ] IMAGE_NET_LABELS_T = \ ["alp", "artichoke", "bookshop", "butcher shop", "carbonara", "chain", "chainlink fence", "cliff dwelling", "confectionery", "corn", "dishrag", "dock", "grocery store", "head cabbage", "honeycomb", "jigsaw puzzle", "lakeside", "rapeseed", "sandbar", "shoe shop", "shower curtain", "stone wall", "theater curtain", "tile roof", "vault", "velvet", "window screen", "wool", ] train_parameters = { "epochs": 400, "learning_rate": 0.0001, "texture_batch_size": 32, "image_net_batch_size": 32, "weight_decay": 0.0005 } image_net_data_set_path = "./Dataset/ImageNet/ImageNet_X.pickle" image_net_label_set_path = "./Dataset/ImageNet/ImageNet_Y.pickle" image_net_test_path = "./Dataset/ImageNet/ImageNet_Test.pickle" image_net_S2_data_set_path = "./Dataset/ImageNet/ImageNet_S2X.pickle" image_net_S2_label_set_path = "./Dataset/ImageNet/ImageNet_S2Y.pickle" image_net_S2_test_path = "./Dataset/ImageNet/ImageNet_S2_Test.pickle" image_net_T_data_set_path = "./Dataset/ImageNet/ImageNet_TX.pickle" image_net_T_label_set_path = "./Dataset/ImageNet/ImageNet_TY.pickle" image_net_T_test_path = "./Dataset/ImageNet/ImageNet_T_Test.pickle" texture_train_data_set_path = "./Dataset/Texture/DTD/Texture_DTD_train{0}_X.pickle" texture_train_label_set_path = "./Dataset/Texture/DTD/Texture_DTD_train{0}_Y.pickle" texture_val_data_set_path = "./Dataset/Texture/DTD/Texture_DTD_val{0}_X.pickle" texture_val_label_set_path = "./Dataset/Texture/DTD/Texture_DTD_val{0}_Y.pickle" auto_encoder_model_path = "./Models/Auto_encoder_Model_epoch_300_lr_0.001_noise_factor_0.5.pt" saved_model_name = "./Models/MTL/DTD/Multitask_Classifier_Model_epoch_" + str( train_parameters["epochs"]) + "_lr_" + str( train_parameters["learning_rate"]) + "_split{0}.pth" split_size = 0.05 # training starts texture_data_loader_list = DataPreProcessor.preprocess_DTD_train_val_10_splits(texture_train_data_set_path, texture_train_label_set_path, texture_val_data_set_path, texture_val_label_set_path, train_parameters[ "texture_batch_size"], num_workers=0, device=device) # image_net_S2_data_loader_dict = DataPreProcessor.preprocess_image_net(image_net_S2_data_set_path, # image_net_S2_label_set_path, # train_parameters[ # "image_net_batch_size"], # image_net_S2_test_path, # num_workers=0, # split_size=split_size, # device=device, # type="ImageNet_S2" # ) # image_net_T_data_loader_dict = DataPreProcessor.preprocess_image_net(image_net_T_data_set_path, # image_net_T_label_set_path, # train_parameters[ # "image_net_batch_size"], # image_net_T_test_path, # num_workers=0, # split_size=split_size, # device=device, # type="ImageNet_T" # ) image_net_data_loader_dict = DataPreProcessor.preprocess_image_net(image_net_data_set_path, image_net_label_set_path, train_parameters[ "image_net_batch_size"], image_net_test_path, num_workers=0, split_size=split_size, device=device, type="ImageNet") train_arguments = { "IMAGE_NET_LABELS": IMAGE_NET_LABELS, "IMAGE_NET_LABELS_S2": IMAGE_NET_LABELS_S2, "IMAGE_NET_LABELS_T": IMAGE_NET_LABELS_T, "TEXTURE_LABELS": TEXTURE_LABELS, # "image_net_S2_data_loader_dict": image_net_S2_data_loader_dict, # "image_net_T_data_loader_dict": image_net_T_data_loader_dict, "image_net_data_loader_dict": image_net_data_loader_dict, "texture_data_loader_list": texture_data_loader_list, "train_parameters": train_parameters, "saved_model_name": saved_model_name } train = Train_Classifier() network = train.train_classifier(train_arguments, device, dataset_name="DTD") # training ends # testing starts texture_data_set_path = "./Dataset/Texture/DTD/Texture_DTD_test{0}_X.pickle" texture_label_set_path = "./Dataset/Texture/DTD/Texture_DTD_test{0}_Y.pickle" data_loader_test_list = DataPreProcessor.prepare_data_loader_test_10_splits(texture_data_set_path, texture_label_set_path, device) model_path_bn = "./Models/MTL/DTD/Multitask_Classifier_Model_epoch_400_lr_0.0001_split{0}.pth" test_arguments = { "data_loader_test_list": data_loader_test_list, "model_path_bn": model_path_bn, "TEXTURE_LABELS": TEXTURE_LABELS } # test(test_arguments, IMAGE_NET_LABELS, device) test = Test_Classifier() test.test_classifier(test_arguments, IMAGE_NET_LABELS, device, dataset_name="DTD") @staticmethod def surface_Train_test(): # 0.05 test train val split device = Util.get_device() # device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) TEXTURE_LABELS = ['Kyberge_blanket1', 'Kyberge_canvas1', 'Kyberge_seat2', 'UIUC07_water', 'UIUC02_bark2', 'KTH_brown_bread', 'UIUC17_glass2', 'Kyberge_scarf1', 'KTH_corduroy', 'UIUC16_glass1', 'Kyberge_stoneslab1', 'Kyberge_rice2', 'UIUC06_wood3', 'KTH_aluminium_foil', 'Kyberge_ceiling1', 'Kyberge_sesameseeds1', 'Kyberge_floor2', 'Kyberge_lentils1', 'KTH_linen', 'UIUC08_granite', 'Kyberge_screen1', 'UIUC24_corduroy', 'Kyberge_oatmeal1', 'Kyberge_stone1', 'UIUC03_bark3', 'Kyberge_pearlsugar1', 'UIUC05_wood2', 'UIUC14_brick1', 'UIUC19_carpet2', 'UIUC23_knit', 'UIUC22_fur', 'UIUC15_brick2', 'KTH_wool', 'KTH_orange_peel', 'Kyberge_blanket2', 'Kyberge_sand1', 'KTH_sponge', 'Kyberge_seat1', 'Kyberge_scarf2', 'KTH_cracker', 'Kyberge_grass1', 'Kyberge_rice1', 'KTH_cork', 'UIUC04_wood1', 'Kyberge_cushion1', 'Kyberge_stone3', 'UIUC18_carpet1', 'Kyberge_ceiling2', 'UIUC10_floor1', 'Kyberge_floor1', 'Kyberge_stone2', 'KTH_cotton', 'UIUC09_marble', 'Kyberge_wall1', 'Kyberge_linseeds1', 'UIUC12_pebbles', 'UIUC11_floor2', 'UIUC01_bark1', 'Kyberge_rug1', 'KTH_styrofoam', 'UIUC25_plaid', 'UIUC21_wallpaper', 'UIUC13_wall', 'UIUC20_upholstery'] IMAGE_NET_LABELS = \ ["alp", "artichoke", "Band Aid", "bathing cap", "bookshop", "bull mastiff", "butcher shop", "carbonara", "chain", "chain saw", "chainlink fence", "cheetah", "cliff dwelling", "common iguana", "confectionery", "container ship", "corn", "crossword puzzle", "dishrag", "dock", "flat-coated retriever", "gibbon", "grocery store", "head cabbage", "honeycomb", "hook", "hourglass", "jigsaw puzzle", "jinrikisha", "lakeside", "lawn mower", "maillot", "microwave", "miniature poodle", "muzzle", "notebook", "ocarina", "orangutan", "organ", "paper towel", "partridge", "rapeseed", "sandbar", "sarong", "sea urchin", "shoe shop", "shower curtain", "stone wall", "theater curtain", "tile roof", "turnstile", "vault", "velvet", "window screen", "wool", "yellow lady's slipper"] train_parameters = { "epochs": 1000, "learning_rate": 0.0001, "texture_batch_size": 32, "image_net_batch_size": 32, "weight_decay": 0.0005 } image_net_data_set_path = "./Dataset/ImageNet/ImageNet_X.pickle" image_net_label_set_path = "./Dataset/ImageNet/ImageNet_Y.pickle" image_net_test_path = "./Dataset/ImageNet/ImageNet_Test.pickle" texture_train_data_set_path = "./Dataset/Texture/Surface/Surface_X_train.pickle" texture_train_label_set_path = "./Dataset/Texture/Surface/Surface_Y_train.pickle" texture_test_data_set_path = "./Dataset/Texture/Surface/Surface_X_vaild.pickle" texture_test_label_set_path = "./Dataset/Texture/Surface/Surface_Y_vaild.pickle" saved_model_name = "./Models/MTL/Surface/Multitask_Classifier_Model_epoch_" + str( train_parameters["epochs"]) + "_lr_" + str( train_parameters["learning_rate"]) + "_split{0}.pth" split_size = 0.20 # training starts texture_train_val_data_loader_list, texture_test_data_loader_list = \ DataPreProcessor.preprocess_texture_surface(texture_train_data_set_path, texture_train_label_set_path, texture_test_data_set_path, texture_test_label_set_path, train_parameters["texture_batch_size"], num_workers=0, device=device, split_size=split_size, type="Surface", folds=1) image_net_data_loader_dict = DataPreProcessor.preprocess_image_net(image_net_data_set_path, image_net_label_set_path, train_parameters[ "image_net_batch_size"], image_net_test_path, num_workers=0, split_size=split_size, device=device, type="ImageNet") train_arguments = { "IMAGE_NET_LABELS": IMAGE_NET_LABELS, "TEXTURE_LABELS": TEXTURE_LABELS, "image_net_data_loader_dict": image_net_data_loader_dict, "texture_data_loader_list": texture_train_val_data_loader_list, "train_parameters": train_parameters, "saved_model_name": saved_model_name } train = Train_Classifier() network = train.train_classifier(train_arguments, device, dataset_name="kth") # training ends # testing starts model_path_bn = "./Models/MTL/Surface/Multitask_Classifier_Model_epoch_400_lr_0.0001_split{0}.pth" test_arguments = { "data_loader_test_list": texture_test_data_loader_list, "model_path_bn": model_path_bn, "TEXTURE_LABELS": TEXTURE_LABELS } test = Test_Classifier() accuracy_list, mean_accuracy = test.test_classifier(test_arguments, IMAGE_NET_LABELS, device) file1 = open("Surface_Details.txt", "a") file1.write(str(train_parameters)) file1.write("Surface Mean accuracy: {0}\n".format(mean_accuracy)) file1.write(str(accuracy_list)) pd.DataFrame.from_dict( accuracy_list, orient='columns' ).to_csv("./Accuracy_Surface.csv") @staticmethod def kth_Train_test(): # 0.05 test train val split device = Util.get_device() # device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) TEXTURE_LABELS = ["KTH_aluminium_foil", "KTH_brown_bread", "KTH_corduroy", "KTH_cork", "KTH_cotton", "KTH_cracker", "KTH_linen", "KTH_orange_peel", "KTH_sponge", "KTH_styrofoam", "KTH_wool"] IMAGE_NET_LABELS = \ ["alp", "artichoke", "Band Aid", "bathing cap", "bookshop", "bull mastiff", "butcher shop", "carbonara", "chain", "chain saw", "chainlink fence", "cheetah", "cliff dwelling", "common iguana", "confectionery", "container ship", "corn", "crossword puzzle", "dishrag", "dock", "flat-coated retriever", "gibbon", "grocery store", "head cabbage", "honeycomb", "hook", "hourglass", "jigsaw puzzle", "jinrikisha", "lakeside", "lawn mower", "maillot", "microwave", "miniature poodle", "muzzle", "notebook", "ocarina", "orangutan", "organ", "paper towel", "partridge", "rapeseed", "sandbar", "sarong", "sea urchin", "shoe shop", "shower curtain", "stone wall", "theater curtain", "tile roof", "turnstile", "vault", "velvet", "window screen", "wool", "yellow lady's slipper"] train_parameters = { "epochs": 400, "learning_rate": 0.0001, "texture_batch_size": 32, "image_net_batch_size": 32, "weight_decay": 0.0005 } image_net_data_set_path = "./Dataset/ImageNet/ImageNet_X.pickle" image_net_label_set_path = "./Dataset/ImageNet/ImageNet_Y.pickle" image_net_test_path = "./Dataset/ImageNet/ImageNet_Test.pickle" texture_train_data_set_path = "./Dataset/Texture/kth/kth_X.pickle" texture_train_label_set_path = "./Dataset/Texture/kth/kth_Y.pickle" saved_model_name = "./Models/MTL/kth/Multitask_Classifier_Model_epoch_" + str( train_parameters["epochs"]) + "_lr_" + str( train_parameters["learning_rate"]) + "_split{0}.pth" split_size = 0.20 # training starts texture_train_val_data_loader_list, texture_test_data_loader_list = \ DataPreProcessor.preprocess_texture_except_DTD(texture_train_data_set_path, texture_train_label_set_path, train_parameters[ "texture_batch_size"], num_workers=0, device=device, split_size=split_size, type="Kth", folds=1) image_net_data_loader_dict = DataPreProcessor.preprocess_image_net(image_net_data_set_path, image_net_label_set_path, train_parameters[ "image_net_batch_size"], image_net_test_path, num_workers=0, split_size=split_size, device=device, type="ImageNet") train_arguments = { "IMAGE_NET_LABELS": IMAGE_NET_LABELS, "TEXTURE_LABELS": TEXTURE_LABELS, "image_net_data_loader_dict": image_net_data_loader_dict, "texture_data_loader_list": texture_train_val_data_loader_list, "train_parameters": train_parameters, "saved_model_name": saved_model_name } train = Train_Classifier() train.train_classifier(train_arguments, device, dataset_name="kth") # training ends # testing starts model_path_bn = "./Models/MTL/kth/Multitask_Classifier_Model_epoch_400_lr_0.0001_split{0}.pth" test_arguments = { "data_loader_test_list": texture_test_data_loader_list, "model_path_bn": model_path_bn, "TEXTURE_LABELS": TEXTURE_LABELS } test = Test_Classifier() accuracy_list, mean_accuracy = test.test_classifier(test_arguments, IMAGE_NET_LABELS, device) file1 = open("kth_Details.txt", "a") file1.write(str(train_parameters)) file1.write("kth Mean accuracy: {0}\n".format(mean_accuracy)) file1.write(str(accuracy_list)) pd.DataFrame.from_dict( accuracy_list, orient='columns' ).to_csv("./Accuracy_Kth.csv") @staticmethod def kth_Train_test_single(): # 0.05 test train val split device = Util.get_device() # device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) TEXTURE_LABELS = ["KTH_aluminium_foil", "KTH_brown_bread", "KTH_corduroy", "KTH_cork", "KTH_cotton", "KTH_cracker", "KTH_linen", "KTH_orange_peel", "KTH_sponge", "KTH_styrofoam", "KTH_wool"] train_parameters = { "epochs": 25, "learning_rate": 0.0001, "texture_batch_size": 32, "image_net_batch_size": 32, "weight_decay": 0.0005 } texture_train_data_set_path = "./Dataset/Texture/kth/kth_X.pickle" texture_train_label_set_path = "./Dataset/Texture/kth/kth_Y.pickle" saved_model_name = "./Models/MTL/kth/Multitask_Classifier_Model_epoch_" + str( train_parameters["epochs"]) + "_lr_" + str( train_parameters["learning_rate"]) + "_split{0}.pth" split_size = 0.20 # training starts texture_train_val_data_loader_list, texture_test_data_loader_list = \ DataPreProcessor.preprocess_texture_except_DTD(texture_train_data_set_path, texture_train_label_set_path, train_parameters[ "texture_batch_size"], num_workers=0, device=device, split_size=split_size, type="Kth", folds=1) train_arguments = { "TEXTURE_LABELS": TEXTURE_LABELS, "texture_data_loader_list": texture_train_val_data_loader_list, "train_parameters": train_parameters, "saved_model_name": saved_model_name } train = Train_Classifier() train.train_classifier_single(train_arguments, device) # training ends # testing starts model_path_bn = "./Models/MTL/kth/Multitask_Classifier_Model_epoch_25_lr_0.0001_split{0}.pth" test_arguments = { "data_loader_test_list": texture_test_data_loader_list, "model_path_bn": model_path_bn, "TEXTURE_LABELS": TEXTURE_LABELS } test = Test_Classifier() accuracy_list, mean_accuracy = test.test_classifier_single(test_arguments, device) file1 = open("kth_Details.txt", "a") file1.write(str(train_parameters)) file1.write("kth Mean accuracy: {0}\n".format(mean_accuracy)) file1.write(str(accuracy_list)) pd.DataFrame.from_dict( accuracy_list, orient='columns' ).to_csv("./Accuracy_Kth_single.csv") @staticmethod def fmd_Train_test(): # 0.05 test train val split device = Util.get_device() # device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) TEXTURE_LABELS = ["fabric", "foliage", "glass", "leather", "metal", "paper", "plastic", "stone", "water", "wood"] IMAGE_NET_LABELS = \ ["alp", "artichoke", "Band Aid", "bathing cap", "bookshop", "bull mastiff", "butcher shop", "carbonara", "chain", "chain saw", "chainlink fence", "cheetah", "cliff dwelling", "common iguana", "confectionery", "container ship", "corn", "crossword puzzle", "dishrag", "dock", "flat-coated retriever", "gibbon", "grocery store", "head cabbage", "honeycomb", "hook", "hourglass", "jigsaw puzzle", "jinrikisha", "lakeside", "lawn mower", "maillot", "microwave", "miniature poodle", "muzzle", "notebook", "ocarina", "orangutan", "organ", "paper towel",
<filename>caret_analyze/infra/lttng/ros2_tracing/processor.py # Copyright 2019 <NAME> GmbH # Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for trace events processor and ROS 2 model creation.""" from typing import Dict, List, Set, Tuple from tracetools_analysis.processor import (EventHandler, EventMetadata, HandlerMap) from tracetools_read import get_field from .data_model import Ros2DataModel class Ros2Handler(EventHandler): """ ROS 2-aware event handling class implementation. Handles a trace's events and builds a model with the data. """ def __init__( self, **kwargs, ) -> None: """Create a Ros2Handler.""" # Link a ROS trace event to its corresponding handling method handler_map: HandlerMap = {} handler_map['ros2:rcl_init'] = self._handle_rcl_init handler_map['ros2:rcl_node_init'] = self._handle_rcl_node_init handler_map['ros2:rcl_publisher_init'] = self._handle_rcl_publisher_init handler_map['ros2:rcl_subscription_init'] = self._handle_rcl_subscription_init handler_map['ros2:rclcpp_subscription_init'] = self._handle_rclcpp_subscription_init handler_map[ 'ros2:rclcpp_subscription_callback_added' ] = self._handle_rclcpp_subscription_callback_added handler_map['ros2:rcl_service_init'] = self._handle_rcl_service_init handler_map[ 'ros2:rclcpp_service_callback_added' ] = self._handle_rclcpp_service_callback_added handler_map['ros2:rcl_client_init'] = self._handle_rcl_client_init handler_map['ros2:rcl_timer_init'] = self._handle_rcl_timer_init handler_map['ros2:rclcpp_timer_callback_added'] = self._handle_rclcpp_timer_callback_added handler_map['ros2:rclcpp_timer_link_node'] = self._handle_rclcpp_timer_link_node handler_map['ros2:rclcpp_callback_register'] = self._handle_rclcpp_callback_register handler_map['ros2:callback_start'] = self._handle_callback_start handler_map['ros2:callback_end'] = self._handle_callback_end handler_map[ 'ros2:rcl_lifecycle_state_machine_init' ] = self._handle_rcl_lifecycle_state_machine_init handler_map['ros2:rcl_lifecycle_transition'] = self._handle_rcl_lifecycle_transition handler_map['ros2:rclcpp_publish'] = self._handle_rclcpp_publish handler_map['ros2:message_construct'] = self._handle_message_construct handler_map['ros2:rclcpp_intra_publish'] = self._handle_rclcpp_intra_publish handler_map[ 'ros2:dispatch_subscription_callback' ] = self._handle_dispatch_subscription_callback handler_map[ 'ros2:dispatch_intra_process_subscription_callback' ] = self._handle_dispatch_intra_process_subscription_callback handler_map['ros2_caret:on_data_available'] = self._handle_on_data_available handler_map['ros2:rcl_publish'] = self._handle_rcl_publish handler_map['ros2_caret:dds_write'] = self._handle_dds_write handler_map['ros2_caret:dds_bind_addr_to_stamp'] = self._handle_dds_bind_addr_to_stamp handler_map['ros2_caret:dds_bind_addr_to_addr'] = self._handle_dds_bind_addr_to_addr handler_map['ros2_caret:rmw_implementation'] = self._handle_rmw_implementation handler_map['ros2_caret:add_callback_group'] = self._handle_add_callback_group handler_map['ros2_caret:add_callback_group_static_executor'] = \ self._handle_add_callback_group_static_executor handler_map['ros2_caret:construct_executor'] = self._handle_construct_executor handler_map['ros2_caret:construct_static_executor'] = \ self._handle_construct_static_executor handler_map['ros2_caret:callback_group_add_timer'] = \ self._handle_callback_group_add_timer handler_map['ros2_caret:callback_group_add_subscription'] = \ self._handle_callback_group_add_subscription handler_map['ros2_caret:callback_group_add_service'] = \ self._handle_callback_group_add_service handler_map['ros2_caret:callback_group_add_client'] = \ self._handle_callback_group_add_client handler_map['ros2_caret:tilde_subscription_init'] = \ self._handle_tilde_subscription_init handler_map['ros2_caret:tilde_publisher_init'] = \ self._handle_tilde_publisher_init handler_map['ros2_caret:tilde_subscribe'] = \ self._handle_tilde_subscribe handler_map['ros2_caret:tilde_publish'] = \ self._handle_tilde_publish handler_map['ros2_caret:tilde_subscribe_added'] = \ self._handle_tilde_subscribe_added handler_map['ros2_caret:sim_time'] = \ self._handle_sim_time super().__init__( handler_map=handler_map, data_model=Ros2DataModel(), **kwargs, ) # Temporary buffers self._callback_instances: Dict[int, Tuple[Dict, EventMetadata]] = {} @staticmethod def get_trace_points() -> List[str]: return [ 'ros2:rcl_init', 'ros2:rcl_node_init', 'ros2:rcl_publisher_init', 'ros2:rcl_subscription_init', 'ros2:rclcpp_subscription_init', 'ros2:rclcpp_subscription_callback_added', 'ros2:rcl_service_init', 'ros2:rclcpp_service_callback_added', 'ros2:rcl_client_init', 'ros2:rcl_timer_init', 'ros2:rclcpp_timer_callback_added', 'ros2:rclcpp_timer_link_node', 'ros2:rclcpp_callback_register', 'ros2:callback_start', 'ros2:callback_end', 'ros2:rcl_lifecycle_state_machine_init', 'ros2:rcl_lifecycle_transition', 'ros2:rclcpp_publish', 'ros2:message_construct', 'ros2:rclcpp_intra_publish', 'ros2:dispatch_subscription_callback', 'ros2:dispatch_intra_process_subscription_callback', 'ros2_caret:on_data_available', 'ros2:rcl_publish', 'ros2_caret:dds_write', 'ros2_caret:dds_bind_addr_to_stamp', 'ros2_caret:dds_bind_addr_to_addr', 'ros2_caret:rmw_implementation', 'ros2_caret:add_callback_group', 'ros2_caret:add_callback_group_static_executor', 'ros2_caret:construct_executor', 'ros2_caret:construct_static_executor', 'ros2_caret:callback_group_add_timer', 'ros2_caret:callback_group_add_subscription', 'ros2_caret:callback_group_add_service', 'ros2_caret:callback_group_add_client', 'ros2_caret:tilde_subscription_init', 'ros2_caret:tilde_publisher_init', 'ros2_caret:tilde_subscribe', 'ros2_caret:tilde_publish', 'ros2_caret:tilde_subscribe_added', 'ros2_caret:sim_time', ] @staticmethod def required_events() -> Set[str]: return { 'ros2:rcl_init', } @property def data(self) -> Ros2DataModel: return super().data # type: ignore def _handle_rcl_init( self, event: Dict, metadata: EventMetadata, ) -> None: context_handle = get_field(event, 'context_handle') timestamp = metadata.timestamp pid = metadata.pid version = get_field(event, 'version') self.data.add_context(context_handle, timestamp, pid, version) def _handle_rcl_node_init( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'node_handle') timestamp = metadata.timestamp tid = metadata.tid rmw_handle = get_field(event, 'rmw_handle') name = get_field(event, 'node_name') namespace = get_field(event, 'namespace') self.data.add_node(handle, timestamp, tid, rmw_handle, name, namespace) def _handle_rcl_publisher_init( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'publisher_handle') timestamp = metadata.timestamp node_handle = get_field(event, 'node_handle') rmw_handle = get_field(event, 'rmw_publisher_handle') topic_name = get_field(event, 'topic_name') depth = get_field(event, 'queue_depth') self.data.add_publisher( handle, timestamp, node_handle, rmw_handle, topic_name, depth) def _handle_rcl_subscription_init( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'subscription_handle') timestamp = metadata.timestamp node_handle = get_field(event, 'node_handle') rmw_handle = get_field(event, 'rmw_subscription_handle') topic_name = get_field(event, 'topic_name') depth = get_field(event, 'queue_depth') self.data.add_rcl_subscription( handle, timestamp, node_handle, rmw_handle, topic_name, depth, ) def _handle_rclcpp_subscription_init( self, event: Dict, metadata: EventMetadata, ) -> None: subscription_pointer = get_field(event, 'subscription') timestamp = metadata.timestamp handle = get_field(event, 'subscription_handle') self.data.add_rclcpp_subscription( subscription_pointer, timestamp, handle) def _handle_rclcpp_subscription_callback_added( self, event: Dict, metadata: EventMetadata, ) -> None: subscription_pointer = get_field(event, 'subscription') timestamp = metadata.timestamp callback_object = get_field(event, 'callback') self.data.add_callback_object( subscription_pointer, timestamp, callback_object) def _handle_rcl_service_init( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'service_handle') timestamp = metadata.timestamp node_handle = get_field(event, 'node_handle') rmw_handle = get_field(event, 'rmw_service_handle') service_name = get_field(event, 'service_name') self.data.add_service( handle, timestamp, node_handle, rmw_handle, service_name) def _handle_rclcpp_service_callback_added( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'service_handle') timestamp = metadata.timestamp callback_object = get_field(event, 'callback') self.data.add_callback_object(handle, timestamp, callback_object) def _handle_rcl_client_init( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'client_handle') timestamp = metadata.timestamp node_handle = get_field(event, 'node_handle') rmw_handle = get_field(event, 'rmw_client_handle') service_name = get_field(event, 'service_name') self.data.add_client(handle, timestamp, node_handle, rmw_handle, service_name) def _handle_rcl_timer_init( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'timer_handle') timestamp = metadata.timestamp period = get_field(event, 'period') tid = metadata.tid self.data.add_timer(handle, timestamp, period, tid) def _handle_rclcpp_timer_callback_added( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'timer_handle') timestamp = metadata.timestamp callback_object = get_field(event, 'callback') self.data.add_callback_object(handle, timestamp, callback_object) def _handle_rclcpp_timer_link_node( self, event: Dict, metadata: EventMetadata, ) -> None: handle = get_field(event, 'timer_handle') timestamp = metadata.timestamp node_handle = get_field(event, 'node_handle') self.data.add_timer_node_link(handle, timestamp, node_handle) def _handle_rclcpp_callback_register( self, event: Dict, metadata: EventMetadata, ) -> None: callback_object = get_field(event, 'callback') timestamp = metadata.timestamp symbol = get_field(event, 'symbol') self.data.add_callback_symbol(callback_object, timestamp, symbol) def _handle_callback_start( self, event: Dict, metadata: EventMetadata, ) -> None: # Add to dict callback = get_field(event, 'callback') timestamp = metadata.timestamp is_intra_process = get_field( event, 'is_intra_process', raise_if_not_found=False) self.data.add_callback_start_instance( timestamp, callback, is_intra_process) def _handle_callback_end( self, event: Dict, metadata: EventMetadata, ) -> None: # Fetch from dict callback = get_field(event, 'callback') timestamp = metadata.timestamp self.data.add_callback_end_instance(timestamp, callback) def _handle_rcl_lifecycle_state_machine_init( self, event: Dict, metadata: EventMetadata, ) -> None: node_handle = get_field(event, 'node_handle') state_machine = get_field(event, 'state_machine') self.data.add_lifecycle_state_machine(state_machine, node_handle) def _handle_rcl_lifecycle_transition( self, event: Dict, metadata: EventMetadata, ) -> None: timestamp = metadata.timestamp state_machine = get_field(event, 'state_machine') start_label = get_field(event, 'start_label') goal_label = get_field(event, 'goal_label') self.data.add_lifecycle_state_transition( state_machine, start_label, goal_label, timestamp) def _handle_rclcpp_publish( self, event: Dict, metadata: EventMetadata, ) -> None: publisher_handle = get_field(event, 'publisher_handle') timestamp = metadata.timestamp message = get_field(event, 'message') message_timestamp = get_field(event, 'message_timestamp') self.data.add_rclcpp_publish_instance( timestamp, publisher_handle, message, message_timestamp) def _handle_rcl_publish( self, event: Dict, metadata: EventMetadata, ) -> None: publisher_handle = get_field(event, 'publisher_handle') timestamp = metadata.timestamp message = get_field(event, 'message') self.data.add_rcl_publish_instance( timestamp, publisher_handle, message) def _handle_message_construct( self, event: Dict, metadata: EventMetadata, ) -> None: original_message = get_field(event, 'original_message') constructed_message = get_field(event, 'constructed_message') timestamp = metadata.timestamp self.data.add_message_construct_instance( timestamp, original_message, constructed_message) def _handle_rclcpp_intra_publish( self, event: Dict, metadata: EventMetadata, ) -> None: message = get_field(event, 'message') publisher_handle = get_field(event, 'publisher_handle') timestamp = metadata.timestamp message_timestamp = get_field(event, 'message_timestamp') self.data.add_rclcpp_intra_publish_instance( timestamp, publisher_handle, message, message_timestamp) def _handle_dispatch_subscription_callback( self, event: Dict, metadata: EventMetadata, ) -> None: callback_object = get_field(event, 'callback') message = get_field(event, 'message') timestamp = metadata.timestamp source_stamp = get_field(event, 'source_stamp') message_timestamp = get_field(event, 'message_timestamp') self.data.add_dispatch_subscription_callback_instance( timestamp, callback_object, message, source_stamp, message_timestamp ) def _handle_dispatch_intra_process_subscription_callback( self, event: Dict, metadata: EventMetadata, ) -> None: callback_object = get_field(event, 'callback') message = get_field(event, 'message') timestamp = metadata.timestamp message_timestamp = get_field(event, 'message_timestamp') self.data.add_dispatch_intra_process_subscription_callback_instance( timestamp, callback_object, message, message_timestamp ) def _handle_on_data_available( self, event: Dict, metadata: EventMetadata, ) -> None: timestamp = metadata.timestamp source_stamp = get_field(event, 'source_stamp') self.data.add_on_data_available_instance(timestamp, source_stamp) def _handle_dds_write( self, event: Dict, metadata: EventMetadata, ) -> None: timestamp = metadata.timestamp message = get_field(event, 'message') self.data.add_dds_write_instance(timestamp, message) def _handle_dds_bind_addr_to_stamp( self, event: Dict, metadata: EventMetadata, ) -> None: timestamp = metadata.timestamp addr = get_field(event, 'addr') source_stamp = get_field(event, 'source_stamp') self.data.add_dds_bind_addr_to_stamp(timestamp, addr, source_stamp) def _handle_dds_bind_addr_to_addr( self, event: Dict, metadata: EventMetadata, ) -> None: timestamp = metadata.timestamp addr_from = get_field(event, 'addr_from') addr_to = get_field(event, 'addr_to') self.data.add_dds_bind_addr_to_addr(timestamp, addr_from, addr_to) def _handle_rmw_implementation( self, event: Dict, metadata: EventMetadata ) -> None: metadata rmw_impl = get_field(event, 'rmw_impl') self.data.add_rmw_implementation(rmw_impl) def _handle_construct_executor( self, event: Dict, metadata: EventMetadata ) -> None: stamp = metadata.timestamp executor_addr = get_field(event, 'executor_addr') executor_type_name = get_field(event, 'executor_type_name') self.data.add_executor(executor_addr, stamp, executor_type_name) def _handle_construct_static_executor( self, event: Dict, metadata: EventMetadata ) -> None: stamp = metadata.timestamp executor_addr = get_field(event, 'executor_addr') collector_addr = get_field(event, 'entities_collector_addr') executor_type_name = get_field(event, 'executor_type_name') self.data.add_executor_static(executor_addr, collector_addr, stamp, executor_type_name) def _handle_add_callback_group( self, event: Dict, metadata: EventMetadata ) -> None: stamp = metadata.timestamp executor_addr = get_field(event, 'executor_addr') callback_group_addr = get_field(event, 'callback_group_addr') group_type_name = get_field(event, 'group_type_name') self.data.add_callback_group(executor_addr, stamp, callback_group_addr, group_type_name) def _handle_add_callback_group_static_executor( self, event: Dict, metadata: EventMetadata ) -> None: stamp = metadata.timestamp collector_addr = get_field(event, 'entities_collector_addr') callback_group_addr = get_field(event, 'callback_group_addr') group_type_name = get_field(event, 'group_type_name') self.data.add_callback_group_static_executor( collector_addr, stamp, callback_group_addr, group_type_name) def _handle_callback_group_add_timer( self, event: Dict, metadata: EventMetadata
#usr/bin/python import os import time import itertools import string import hashlib import sys import signal import getopt import random import threading import multiprocessing from multiprocessing import Process from ctypes import c_char_p __version__ = '1.0.0' info = """ Name : find-extreme-hashes.py Created By : <NAME> Blog : http://thomas-messmer.com Documentation : https://github.com/Zumili/find-extreme-hashes License : The MIT License Version : %s """ % (__version__) #TODO: Use "argparse" instead of "getopt" with ugly parameter test #https://docs.python.org/3/howto/argparse.html # Used to break all loops in processes and treads done = False class AttackConfig(object): id = 0 user_name = "" hashlib_type_str = "" charset_combined = "" output_file = "" random_length = 0 digits_only = 0 use_postfix = False no_info = False find_small_hash = True find_big_hash = False bf_steps=0 # The class "constructor" - It's actually an initializer def __init__(self, id, user_name, hashlib_type_str,charset_combined, output_file,random_length,digits_only,use_postfix, no_info,find_small_hash,find_big_hash,bf_steps): self.id = id self.user_name = user_name self.hashlib_type_str = hashlib_type_str self.charset_combined = charset_combined self.output_file = output_file self.random_length = random_length self.digits_only = digits_only self.use_postfix = use_postfix self.no_info = no_info self.find_small_hash = find_small_hash self.find_big_hash = find_big_hash self.bf_steps = bf_steps def signal_handler(signal, frame): global done done=True sys.exit(0) def animate(mpa_hash_per_sec): for c in itertools.cycle(['|', '/', '-', '\\']): if done==True: break hash_per_sec_string = "H/s: " for i in range(len(mpa_hash_per_sec)): hash_per_sec_string = (hash_per_sec_string + "(P" +str(i)+" " + str(mpa_hash_per_sec[i])+") ") sys.stdout.write('\r' + c + " " + hash_per_sec_string ) sys.stdout.flush() time.sleep(0.2) def _attack(attack_config, mpv_smallest_hexdigest, mpv_biggest_hexdigest, mpa_hash_per_sec_array, lock): smallest_hex = ("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") biggest_hex = ("00000000000000000000000000000000" "00000000000000000000000000000000" "00000000000000000000000000000000" "00000000000000000000000000000000") smallest_candidate = "" biggest_candidate = "" hash_per_sec = 0 worker_passes = 0 smaller_candidates_found = 0 bigger_candidates_found = 0 # Create local variables for performance increase! # Do not use attack_config.<element> in loops! charset_combined = attack_config.charset_combined output_file = attack_config.output_file random_length = attack_config.random_length digits_only = attack_config.digits_only use_postfix = attack_config.use_postfix user_name = attack_config.user_name id = attack_config.id hashlib_type_str = attack_config.hashlib_type_str find_small_hash = attack_config.find_small_hash find_big_hash = attack_config.find_big_hash no_info = attack_config.no_info bf_steps = attack_config.bf_steps start_time = time.time() # If random_length != 0 we use a randomized approach if random_length != 0: #rsw = int(random_length,10) L = len(attack_config.charset_combined) tmp_passes = 0 while 1: #112000 H/s if not use_postfix: random_string = user_name+''.join ( charset_combined[int(L * random.random())] for _ in range(random_length) ) else: random_string = ''.join ( charset_combined[int(L * random.random())] for _ in range(random_length) ) + user_name #worker_passes += 1 #if worker_passes % 100000 == 0: # The following 4 lines should be slightly faster than # the previous 2 tmp_passes += 1 if tmp_passes > 100000: tmp_passes = 0 worker_passes += 100000 elapsed_time_fl = (time.time() - start_time) start_time = time.time() hash_per_sec = int(100000/elapsed_time_fl) lock.acquire() mpa_hash_per_sec_array[id] = hash_per_sec lock.release() if done==True: break # Even a bit more faster when leaving encoding string #(python 3.x only) act_hash_hex = hashlib.new(hashlib_type_str,random_string .encode()).hexdigest() # Faster than using bytes #act_hash_hex = hashlib.new(hashlib_type_str,random_string #.encode('utf-8')).hexdigest() # 10% slower #act_hash_hex = hashlib.new(hashlib_type_str,bytes( #random_string, #encoding='utf-8') #).hexdigest() if (find_small_hash and act_hash_hex < smallest_hex and (not digits_only or act_hash_hex.isdecimal())): smallest_hex = act_hash_hex smaller_candidates_found += 1 lock.acquire() if smallest_hex < mpv_smallest_hexdigest.get(): mpv_smallest_hexdigest.set(smallest_hex) smallest_candidate = random_string print_found_info(id,"Smaller",smallest_hex, random_string,worker_passes, smaller_candidates_found,no_info) write_output(output_file,smallest_hex, smallest_candidate, biggest_hex, biggest_candidate) else: smallest_hex = mpv_smallest_hexdigest.get() lock.release() elif (find_big_hash and act_hash_hex > biggest_hex and (not digits_only or act_hash_hex.isdecimal())): biggest_hex = act_hash_hex bigger_candidates_found += 1 lock.acquire() if biggest_hex > mpv_biggest_hexdigest.get(): mpv_biggest_hexdigest.set(biggest_hex) biggest_candidate = random_string print_found_info(id,"Bigger",biggest_hex, random_string,worker_passes, bigger_candidates_found,no_info) write_output(output_file,smallest_hex, smallest_candidate, biggest_hex, biggest_candidate) else: biggest_hex = mpv_biggest_hexdigest.get() lock.release() # If random_length not set we use a brute force approach else: brute_force_string = "" break_loop = False tmp_passes = 0 for n in range(bf_steps, 47+1): if not no_info: print("\n[!] I'm at character %i"%n) for xs in itertools.product(charset_combined, repeat=n): saved = ''.join(xs) brute_force_string = user_name+saved #worker_passes += 1 #if worker_passes % 100000 == 0: # The following 4 lines should be slightly faster than # the previous 2 tmp_passes += 1 if tmp_passes > 100000: tmp_passes = 0 worker_passes += 100000 elapsed_time_fl = (time.time() - start_time) start_time = time.time() hash_per_sec = int(100000/elapsed_time_fl) lock.acquire() mpa_hash_per_sec_array[id] = hash_per_sec lock.release() if done==True: break_loop = True break act_hash_hex = hashlib.new(hashlib_type_str, brute_force_string .encode()).hexdigest() if (find_small_hash and act_hash_hex < smallest_hex and (not digits_only or act_hash_hex.isdecimal())): smallest_hex = act_hash_hex smaller_candidates_found += 1 lock.acquire() if smallest_hex < mpv_smallest_hexdigest.get(): mpv_smallest_hexdigest.set(smallest_hex) smallest_candidate = brute_force_string print_found_info(id,"Smaller",smallest_hex, brute_force_string,worker_passes, smaller_candidates_found,no_info) write_output(output_file,smallest_hex, smallest_candidate, biggest_hex, biggest_candidate) else: smallest_hex = mpv_smallest_hexdigest.get() lock.release() elif (find_big_hash and act_hash_hex > biggest_hex and (not digits_only or act_hash_hex.isdecimal())): biggest_hex = act_hash_hex bigger_candidates_found += 1 lock.acquire() if biggest_hex > mpv_biggest_hexdigest.get(): mpv_biggest_hexdigest.set(biggest_hex) biggest_candidate = brute_force_string print_found_info(id,"Bigger",biggest_hex, brute_force_string,worker_passes, bigger_candidates_found,no_info) write_output(output_file,smallest_hex, smallest_candidate, biggest_hex, biggest_candidate) else: biggest_hex = mpv_biggest_hexdigest.get() lock.release() if break_loop: break def write_output(output_file,smallest_hex,smallest_candidate, biggest_hex,biggest_candidate): if output_file != '': f = open(output_file, "w") if smallest_candidate != "": f.write(smallest_hex+':'+smallest_candidate+'\n') if biggest_candidate != "": f.write(biggest_hex+':'+biggest_candidate) f.close() return def print_found_info(id,hash_type_str,hexdigest,candidate,worker_passes, candidate_found,no_info): if no_info == False: print('\n\n\n'+hash_type_str+' Hash in P%i\n' % id) print(hexdigest+':'+candidate) print("\n[|] Time: ", time.strftime('%H:%M:%S')) print("[|] Keywords attempted: ", worker_passes,'') print("[|] Candidates found: ", candidate_found,'\n') else: print(hexdigest+':'+candidate) return def print_help(msg): print_options(msg) def print_options(msg): print(msg) print("""Usage: python %s [options] Options Short/Long | Type | Description =====================+======+========================================== -m, --hash-type | Num | [-m ?] hash mode e.g. 0=MD5, 1400=SHA256 -c, --charset | Num | [-c ?] charset [0,1,2,...,custom] -r, --random-length | Num | [-r ?] length of rand str or brute force -f, --find-mode | Num | [-f ?] find 0=small 1=big 2=small and big -d, --digits-only | | hash must only contain digits (0-9) -u, --user-name | Str | user-name works as pre- or postfix -p, --post-fix | | selects if user-name should be postfix -o, --output-file | Str | output file for found extreme hashes -n, --no-info | | only hash:candidate pair, good for pipe -w, --worker | Num | [-w ?] worker count, minimum 1 worker -e, --exclude-chars | Str | string of characters removed from charset -b, --bf-steps | Num | [-b ?] brute force step size if worker >1 -s, --shuffle | | shuffle final charset """%sys.argv[0]) return def print_bf_steps_info(): print("""Brute Force Steps - option [-b <steps>] If 1 worker used, brute force starts at pos <steps>+1. If more workers used e.g. [-w 3] brute force starts at: 1*<steps>+1 for 1. worker. 2*<steps>+1 for 2. worker. 3*<steps>+1 for 3. worker. ...and so on... """) return def print_random_length_info(): print("""Random Length and Brute Force Selector - option [-r <length>] If used e.g. [-r 8] it defines the length of random string used to create the hash. If [-r 0] or [not used] it sets internal mode to brute force. Possible length of string is [1-31]. """) return def print_find_mode_info(): print("""Find Mode - option [-f <mode>] # | Find Mode ============+================================= [not used]| Find only smaller hashes. [-f 0] | Find only smaller hashes. [-f 1] | Find only bigger hashes. [-f 2] | Find both, bigger and smaller hashes. """) return def print_worker_count_info(): print("""Worker Count - option [-w <worker>] [-w 0], [-w 1] or [not used] always use 1 worker Maximum worker count depends on maximum cpu count! Max Cpu Count: %i """%int(multiprocessing.cpu_count())) return def print_user_name_info(): print("""User Name - option [-w <user-name>] Must contain at least 3 characters! """) return def print_output_file_info(): print("""Output File - option [-w <out-file>] Must contain at least 3 charaters! """) return def print_hashtypes(): tmp_list = list(hashlib.algorithms_available) tmp_list.sort() # sorts normally by alphabetical order tmp_list.sort(key=len) # sorts by length tmp_str ='\n '.join(tmp_list) print("Hash Modes - option [-m <mode>]") print("\nAvailable hashing algortihms:\n") print(" ",tmp_str) return # def print_hashtypes(): # print("""Hash Modes - option [-m <mode>] # Mode # | Name | Category # =======+==============+===================== # 0 | MD5 | Raw Hash # 100 | SHA1 | Raw Hash # 600 | BLAKE2b-512 | Raw Hash # 1300 | SHA2-224 | Raw Hash # 1400 | SHA2-256 | Raw Hash # 1700 | SHA2-512 | Raw
a registration with `registrationId` exists in the system. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_with_http_info(registration_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['registration_id'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_registration" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'registration_id' is set if ('registration_id' not in params) or (params['registration_id'] is None): raise ValueError("Missing the required parameter `registration_id` when calling `get_registration`") collection_formats = {} resource_path = '/registrations/{registrationId}'.replace('{format}', 'json') path_params = {} if 'registration_id' in params: path_params['registrationId'] = params['registration_id'] query_params = {} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['APP_NORMAL', 'OAUTH'] return self.api_client.call_api(resource_path, 'HEAD', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_registration_configuration(self, registration_id, **kwargs): """ Get registration configuration. Returns all configuration settings for this registration. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_configuration(registration_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :param bool include_metadata: :return: SettingListSchema If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_registration_configuration_with_http_info(registration_id, **kwargs) else: (data) = self.get_registration_configuration_with_http_info(registration_id, **kwargs) return data def get_registration_configuration_with_http_info(self, registration_id, **kwargs): """ Get registration configuration. Returns all configuration settings for this registration. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_configuration_with_http_info(registration_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :param bool include_metadata: :return: SettingListSchema If the method is called asynchronously, returns the request thread. """ all_params = ['registration_id', 'include_metadata'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_registration_configuration" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'registration_id' is set if ('registration_id' not in params) or (params['registration_id'] is None): raise ValueError("Missing the required parameter `registration_id` when calling `get_registration_configuration`") collection_formats = {} resource_path = '/registrations/{registrationId}/configuration'.replace('{format}', 'json') path_params = {} if 'registration_id' in params: path_params['registrationId'] = params['registration_id'] query_params = {} if 'include_metadata' in params: query_params['includeMetadata'] = params['include_metadata'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['APP_NORMAL', 'OAUTH'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SettingListSchema', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_registration_instance_configuration(self, registration_id, instance_id, **kwargs): """ Get configuration for instance of registration. Returns all configuration settings for this registration instance. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_instance_configuration(registration_id, instance_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :param int instance_id: The instance of this registration (required) :param bool include_metadata: :return: SettingListSchema If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_registration_instance_configuration_with_http_info(registration_id, instance_id, **kwargs) else: (data) = self.get_registration_instance_configuration_with_http_info(registration_id, instance_id, **kwargs) return data def get_registration_instance_configuration_with_http_info(self, registration_id, instance_id, **kwargs): """ Get configuration for instance of registration. Returns all configuration settings for this registration instance. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_instance_configuration_with_http_info(registration_id, instance_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :param int instance_id: The instance of this registration (required) :param bool include_metadata: :return: SettingListSchema If the method is called asynchronously, returns the request thread. """ all_params = ['registration_id', 'instance_id', 'include_metadata'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_registration_instance_configuration" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'registration_id' is set if ('registration_id' not in params) or (params['registration_id'] is None): raise ValueError("Missing the required parameter `registration_id` when calling `get_registration_instance_configuration`") # verify the required parameter 'instance_id' is set if ('instance_id' not in params) or (params['instance_id'] is None): raise ValueError("Missing the required parameter `instance_id` when calling `get_registration_instance_configuration`") if 'instance_id' in params and params['instance_id'] < 0: raise ValueError("Invalid value for parameter `instance_id` when calling `get_registration_instance_configuration`, must be a value greater than or equal to `0`") collection_formats = {} resource_path = '/registrations/{registrationId}/instances/{instanceId}/configuration'.replace('{format}', 'json') path_params = {} if 'registration_id' in params: path_params['registrationId'] = params['registration_id'] if 'instance_id' in params: path_params['instanceId'] = params['instance_id'] query_params = {} if 'include_metadata' in params: query_params['includeMetadata'] = params['include_metadata'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['APP_NORMAL', 'OAUTH'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SettingListSchema', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_registration_instance_launch_history(self, registration_id, instance_id, **kwargs): """ Get launch history for an instance of a registration. Returns history of the launches of the specified instance of this registration. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_instance_launch_history(registration_id, instance_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :param int instance_id: The instance of this registration (required) :param bool include_history_log: Whether to include the history log in the launch history :return: LaunchHistoryListSchema If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_registration_instance_launch_history_with_http_info(registration_id, instance_id, **kwargs) else: (data) = self.get_registration_instance_launch_history_with_http_info(registration_id, instance_id, **kwargs) return data def get_registration_instance_launch_history_with_http_info(self, registration_id, instance_id, **kwargs): """ Get launch history for an instance of a registration. Returns history of the launches of the specified instance of this registration. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_registration_instance_launch_history_with_http_info(registration_id, instance_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str registration_id: id for this registration (required) :param int instance_id: The instance of this registration (required) :param bool include_history_log: Whether to include the history log in the launch history :return: LaunchHistoryListSchema If the method is called asynchronously, returns the request thread. """ all_params = ['registration_id', 'instance_id', 'include_history_log'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_registration_instance_launch_history"
<reponame>chiragmatkar/testplan<gh_stars>0 """ This file is base on the difflib from python standard library (version: 2.7.9) it provides diff (context/unified) functions with more options like GNU diff, including: --ignore-space-change, --ignore-whitespace, --ignore-blank-lines Due to the different algorithm, its output might be a slightly difference compared with that of gnu diff or windiff, but it won't lead to confusing. Module difflib -- helpers for computing deltas between objects. Function get_close_matches(word, possibilities, n=3, cutoff=0.6): Use SequenceMatcher to return list of the best "good enough" matches. Function context_diff(a, b): For two lists of strings, return a delta in context diff format. Function diff(a, b): Return a delta: the difference between `a` and `b` (lists of strings). Function unified_diff(a, b): For two lists of strings, return a delta in unified diff format. Class SequenceMatcher: A flexible class for comparing pairs of sequences of any type. Class Differ: For producing human-readable deltas from sequences of lines of text. """ import os import re import heapq import six from collections import namedtuple as _namedtuple from functools import reduce from datetime import datetime __all__ = [ "Match", "SequenceMatcher", "get_close_matches", "Differ", "IS_CHARACTER_JUNK", "IS_LINE_JUNK", "diff", "context_diff", "unified_diff", ] Match = _namedtuple("Match", "a b size") def _calculate_ratio(matches, length): if length: return 2.0 * matches / length return 1.0 class SequenceMatcher(object): """ SequenceMatcher is a flexible class for comparing pairs of sequences of any type, so long as the sequence elements are hashable. The basic algorithm predates, and is a little fancier than, an algorithm published in the late 1980's by <NAME> Obershelp under the hyperbolic name "gestalt pattern matching". The basic idea is to find the longest contiguous matching subsequence that contains no "junk" elements (R-O doesn't address junk). The same idea is then applied recursively to the pieces of the sequences to the left and to the right of the matching subsequence. This does not yield minimal edit sequences, but does tend to yield matches that "look right" to people. SequenceMatcher tries to compute a "human-friendly diff" between two sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the longest *contiguous* & junk-free matching subsequence. That's what catches peoples' eyes. The Windows(tm) windiff has another interesting notion, pairing up elements that appear uniquely in each sequence. That, and the method here, appear to yield more intuitive difference reports than does diff. This method appears to be the least vulnerable to synching up on blocks of "junk lines", though (like blank lines in ordinary text files, or maybe "<P>" lines in HTML files). That may be because this is the only method of the 3 that has a *concept* of "junk" <wink>. Example, comparing two strings, and considering blanks to be "junk": >>> s = SequenceMatcher(lambda x: x == " ", ... "private Thread currentThread;", ... "private volatile Thread currentThread;") >>> .ratio() returns a float in [0, 1], measuring the "similarity" of the sequences. As a rule of thumb, a .ratio() value over 0.6 means the sequences are close matches: >>> print(round(s.ratio(), 3)) 0.866 >>> If you're only interested in where the sequences match, .get_matching_blocks() is handy: >>> for block in s.get_matching_blocks(): ... print("a[%d] and b[%d] match for %d elements" % block) a[0] and b[0] match for 8 elements a[8] and b[17] match for 21 elements a[29] and b[38] match for 0 elements Note that the last tuple returned by .get_matching_blocks() is always a dummy, (len(a), len(b), 0), and this is the only case in which the last tuple element (number of elements matched) is 0. If you want to know how to change the first sequence into the second, use .get_opcodes(): >>> for opcode in s.get_opcodes(): ... print("%6s a[%d:%d] b[%d:%d]" % opcode) equal a[0:8] b[0:8] insert a[8:8] b[8:17] equal a[8:29] b[17:38] See the Differ class for a fancy human-friendly file differencer, which uses SequenceMatcher both to compare sequences of lines, and to compare sequences of characters within similar (near-matching) lines. See also function get_close_matches() in this module, which shows how simple code building on SequenceMatcher can be used to do useful work. Timing: Basic R-O is cubic time worst case and quadratic time expected case. SequenceMatcher is quadratic time for the worst case and has expected-case behavior dependent in a complicated way on how many elements the sequences have in common; best case time is linear. Methods: __init__(isjunk=None, a='', b='') Construct a SequenceMatcher. set_seqs(a, b) Set the two sequences to be compared. set_seq1(a) Set the first sequence to be compared. set_seq2(b) Set the second sequence to be compared. find_longest_match(alo, ahi, blo, bhi) Find longest matching block in a[alo:ahi] and b[blo:bhi]. get_matching_blocks() Return list of triples describing matching subsequences. get_opcodes() Return list of 5-tuples describing how to turn a into b. ratio() Return a measure of the sequences' similarity (float in [0,1]). quick_ratio() Return an upper bound on .ratio() relatively quickly. real_quick_ratio() Return an upper bound on ratio() very quickly. """ def __init__(self, isjunk=None, a="", b="", autojunk=False): """Construct a SequenceMatcher. Optional arg isjunk is None (the default), or a one-argument function that takes a sequence element and returns true iff the element is junk. None is equivalent to passing "lambda x: 0", i.e. no elements are considered to be junk. For example, pass lambda x: x in " \\t" if you're comparing lines as sequences of characters, and don't want to synch up on blanks or hard tabs. Optional arg a is the first of two sequences to be compared. By default, an empty string. The elements of a must be hashable. See also .set_seqs() and .set_seq1(). Optional arg b is the second of two sequences to be compared. By default, an empty string. The elements of b must be hashable. See also .set_seqs() and .set_seq2(). Optional arg autojunk should be set to False to disable the "automatic junk heuristic" that treats popular elements as junk (see module documentation for more information). """ # Members: # a # first sequence # b # second sequence; differences are computed as "what do # we need to do to 'a' to change it into 'b'?" # b2j # for x in b, b2j[x] is a list of the indices (into b) # at which x appears; junk elements do not appear # fullbcount # for x in b, fullbcount[x] == the number of times x # appears in b; only materialized if really needed (used # only for computing quick_ratio()) # matching_blocks # a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k]; # ascending & non-overlapping in i and in j; terminated by # a dummy (len(a), len(b), 0) sentinel # opcodes # a list of (tag, i1, i2, j1, j2) tuples, where tag is # one of # 'replace' a[i1:i2] should be replaced by b[j1:j2] # 'delete' a[i1:i2] should be deleted # 'insert' b[j1:j2] should be inserted # 'equal' a[i1:i2] == b[j1:j2] # isjunk # a user-supplied function taking a sequence element and # returning true iff the element is "junk" -- this has # subtle but helpful effects on the algorithm, which I'll # get around to writing up someday <0.9 wink>. # DON'T USE! Only __chain_b uses this. Use isbjunk. # isbjunk # for x in b, isbjunk(x) == isjunk(x) but much faster; # it's really the __contains__ method of a hidden dict. # DOES NOT WORK for x in a! # isbpopular # for x in b, isbpopular(x) is true iff b is reasonably long # (at least 200 elements) and x accounts for more than 1 + 1% of # its elements (when autojunk is enabled). # DOES NOT WORK for x in a! self.isjunk = isjunk self.a = self.b = None self.autojunk = autojunk self.set_seqs(a, b) def set_seqs(self, a, b): """Set the two sequences to be compared. >>> s = SequenceMatcher() >>> s.set_seqs("abcd", "bcde") >>> s.ratio() 0.75 """ self.set_seq1(a) self.set_seq2(b) def set_seq1(self, a): """Set the first sequence to be compared. The second sequence to be compared is not changed. >>> s =
new contact name and any optional arguments. Returns new PingdomContact instance Optional Parameters: * email -- Contact email address Type: String * cellphone -- Cellphone number, without the country code part. In some countries you are supposed to exclude leading zeroes. (Requires countrycode and countryiso) Type: String * countrycode -- Cellphone country code (Requires cellphone and countryiso) Type: String * countryiso -- Cellphone country ISO code. For example: US (USA), GB (Britain) or SE (Sweden) (Requires cellphone and countrycode) Type: String * defaultsmsprovider -- Default SMS provider Type: String ['clickatell', 'bulksms', 'esendex', 'cellsynt'] * directtwitter -- Send tweets as direct messages Type: Boolean Default: True * twitteruser -- Twitter user Type: String """ # Warn user about unhandled parameters for key in kwargs: if key not in ['email', 'cellphone', 'countrycode', 'countryiso', 'defaultsmsprovider', 'directtwitter', 'twitteruser']: sys.stderr.write("'%s'" % key + ' is not a valid argument ' + 'of newContact()\n') kwargs['name'] = name contactinfo = self.request("POST", "notification_contacts", kwargs).json()['contact'] return PingdomContact(self, contactinfo) def modifyContacts(self, contactids, paused): """Modifies a list of contacts. Provide comma separated list of contact ids and desired paused state Returns status message """ response = self.request("PUT", "notification_contacts", {'contactids': contactids, 'paused': paused}) return response.json()['message'] def deleteContacts(self, contactids): """Deletes a list of contacts. CANNOT BE REVERSED! Provide a comma-separated list of contactid's to delete Returns status message """ return self.request("DELETE", "notification_contacts", {'delcheckids': contactids}).json()['message'] def singleTest(self, host, checktype, **kwargs): """Performs a single test using a specified Pingdom probe against a specified target. Please note that this method is meant to be used sparingly, not to set up your own monitoring solution. Provide hostname and check type, followed by any optional arguments. Types available: * http * httpcustom * tcp * ping * dns * udp * smtp * pop3 Optional arguments: * probeid -- Probe to use for check Type: Integer Default: A random probe See newCheck() docstring for type-specific arguments Returned structure: { 'status' : <String> Test result status ['up, 'down'] 'responsetime' : <Integer> Response time in milliseconds 'statusdesc' : <String> Short status description 'statusdesclong' : <String> Long status description 'probeid' : <Integer> Probe identifier 'probedesc' : <String> Probe description } """ if checktype == 'http': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'url', 'encryption', 'port', 'auth', 'shouldcontain', 'shouldnotcontain', 'postdata']: if key.startswith('requestheader') is not True: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'http'\n") elif checktype == 'httpcustom': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'url', 'encryption', 'port', 'auth', 'additionalurls']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'httpcustom'\n") elif checktype == 'tcp': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'port', 'stringtosend', 'stringtoexpect']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'tcp'\n") elif checktype == 'ping': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'ping'\n") elif checktype == 'dns': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'expectedip', 'nameserver']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'dns'\n") elif checktype == 'udp': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'port', 'stringtosend', 'stringtoexpect']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'udp'\n") elif checktype == 'smtp': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'port', 'auth', 'stringtoexpect', 'encryption']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'smtp'\n") elif checktype == 'pop3': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'port', 'stringtoexpect', 'encryption']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'pop3'\n") elif checktype == 'imap': # Warn user about unhandled parameters for key in kwargs: if key not in ['probeid', 'port', 'stringtoexpect', 'encryption']: sys.stderr.write("'%s'" % key + ' is not a valid ' + 'argument of singleTest() for type ' + "'imap'\n") else: raise Exception("Invalid checktype in singleTest()") parameters = {'host': host, 'type': checktype} for key, value in kwargs.iteritems(): parameters[key] = value checkinfo = self.request('GET', "single", parameters) return checkinfo.json()['result'] def getSettings(self): """Returns all account-specific settings. Returned structure: { 'firstname' : <String> First name 'lastname' : <String> Last name 'company' : <String> Company 'email' : <String> Email 'phone' : <String> Phone 'phonecountryiso' : <String> Phone country ISO code 'cellphone' : <String> Cellphone 'cellphonecountryiso' : <String> Cellphone country ISO code 'address' : <String> Address line 1 'address2' : <String> Address line 2 'zip' : <String> Zip, postal code or equivalent 'location' : <String> City / location 'state' : <String> State or equivalent 'autologout' : <Boolean> Enable auto-logout 'country' : { 'name' : <String> Country name 'iso' : <String> Country ISO-code 'countryid' : <Integer> Country identifier } 'vatcode' : <String> For certain EU countries, VAT-code 'region' : <String> Region 'regionid' : <Integer> Region identifier, see reference 'accountcreated' : <Integer> Account creation timestamp 'timezone' : { 'id' : <String> Timezone name 'description' : <String> Timezone description 'timezoneid' : <Integer> Timezone identifier } 'dateformat' : <String> Date format 'timeformat' : <String> Time format 'datetimeformatid' : <Integer> Date/time format identifier 'numberformat' : <String> Number format 'numberformatexample' : <String> Example of number presentation 'numberformatid' : <Integer> Number format identifier 'publicreportscode' : <String> URL code 'settingssaved' : <Boolean> True if user has saved initial settings in control panel } """ return self.request('GET', 'settings').json()['settings'] def modifySettings(self, **kwargs): """Modify account-specific settings. Returns status message for operation Optional parameters: * firstname -- First name Type: String * lastname -- Last name Type: String * company -- Company Type: String * email -- Email (Please note that your email is used for authentication purposes such as using this API or logging into the Pingdom Panel) Type: String * cellphone -- Cellphone (without country code) (Requires cellcountrycode and cellcountryiso) Type: String * cellcountrycode -- Cellphone country code, for example 1 (USA) or 46 (Sweden) Type: Integer * cellcountryiso -- Cellphone country ISO code, for example US(USA) or SE (Sweden) Type: String * phone -- Phone (without country code) (Requires phonecountrycode and phonecountryiso) Type: String * phonecountrycode -- Phone country code, for example 1 (USA) or 46 (Sweden) Type: Integer * phonecountryiso -- Phone country ISO code, for example US (USA) or SE (Sweden) Type: String * address -- Address line 1 Type: String * address2 -- Address line 2 Type: String * zip -- Zip, postal code or equivalent Type: String * location -- City / location Type: String * state -- State, province or equivalent Type: String * countryiso -- Country ISO code, for example US (USA) or SE (Sweden) Type: String * vatcode -- For certain EU countries, VAT-code. Example: SE123456789 Type: String * autologout -- Enable auto-logout Type: Boolean * regionid -- Region identifier, for localization purposes. 0 for "Custom"/none. See the API resource "Reference" for more information Type: Integer * timezoneid -- Time zone identifier. See the API resource "Reference" for more information Type: Integer * datetimeformatid -- Date/time format identifier. See the API resource "Reference" for more information Type: Integer * numberformatid -- Number format identifier. See the API resource "Reference" for more information Type: Integer * pubrcustomdesign -- Use custom design for public reports Type: Boolean * pubrtextcolor -- Public reports, custom text color (Example: FEFFFE or 99CC00) Type: String * pubrbackgroundcolor
= self.to_hyperdb_value(hyperdb.Date) res = [] properties = self.getclass(classname).getprops() for nodeid, date_stamp, user, action, params in journal: params = eval_import(params) if isinstance(params, type({})): for param, value in params.items(): if not value: continue property = properties.get(param, None) if property is None: # deleted property continue cvt = self.to_hyperdb_value(property.__class__) if isinstance(property, Password): params[param] = password.JournalPassword(value) elif isinstance(property, Date): params[param] = cvt(value) elif isinstance(property, Interval): params[param] = cvt(value) elif isinstance(property, Boolean): params[param] = cvt(value) # XXX numeric ids res.append((str(nodeid), dc(date_stamp), user, action, params)) return res def save_journal(self, classname, cols, nodeid, journaldate, journaltag, action, params): """ Save the journal entry to the database """ entry = (nodeid, journaldate, journaltag, action, params) # do the insert a = self.arg sql = 'insert into %s__journal (%s) values (%s,%s,%s,%s,%s)' % ( classname, cols, a, a, a, a, a) self.sql(sql, entry) def load_journal(self, classname, cols, nodeid): """ Load the journal from the database """ # now get the journal entries sql = 'select %s from %s__journal where nodeid=%s order by date' % ( cols, classname, self.arg) self.sql(sql, (nodeid,)) return self.cursor.fetchall() def pack(self, pack_before): """ Delete all journal entries except "create" before 'pack_before'. """ date_stamp = self.to_sql_value(Date)(pack_before) # do the delete for classname in self.classes: sql = "delete from %s__journal where date<%s and "\ "action<>'create'" % (classname, self.arg) self.sql(sql, (date_stamp,)) def sql_commit(self): """ Actually commit to the database. """ logging.getLogger('roundup.hyperdb.backend').info('commit') self.conn.commit() # open a new cursor for subsequent work self.cursor = self.conn.cursor() def commit(self): """ Commit the current transactions. Save all data changed since the database was opened or since the last commit() or rollback(). """ # commit the database self.sql_commit() # session and otk are committed with the db but not the other # way round if self.Session: self.Session.commit() if self.Otk: self.Otk.commit() # now, do all the other transaction stuff for method, args in self.transactions: method(*args) # save the indexer self.indexer.save_index() # clear out the transactions self.transactions = [] # clear the cache: Don't carry over cached values from one # transaction to the next (there may be other changes from other # transactions) self.clearCache() def sql_rollback(self): self.conn.rollback() def rollback(self): """ Reverse all actions from the current transaction. Undo all the changes made since the database was opened or the last commit() or rollback() was performed. """ logging.getLogger('roundup.hyperdb.backend').info('rollback') self.sql_rollback() # roll back "other" transaction stuff for method, args in self.transactions: # delete temporary files if method == self.doStoreFile: self.rollbackStoreFile(*args) self.transactions = [] # clear the cache self.clearCache() def sql_close(self): logging.getLogger('roundup.hyperdb.backend').info('close') self.conn.close() def close(self): """ Close off the connection. """ self.indexer.close() self.sql_close() if self.Session: self.Session.close() self.Session = None if self.Otk: self.Otk.close() self.Otk = None # # The base Class class # class Class(hyperdb.Class): """ The handle to a particular class of nodes in a hyperdatabase. All methods except __repr__ and getnode must be implemented by a concrete backend Class. """ # For many databases the LIKE operator ignores case. # Postgres and Oracle have an ILIKE operator to support this. # We define the default here, can be changed in derivative class case_insensitive_like = 'LIKE' # For some databases (mysql) the = operator for strings ignores case. # We define the default here, can be changed in derivative class case_sensitive_equal = '=' def schema(self): """ A dumpable version of the schema that we can store in the database """ return (self.key, [(x, repr(y)) for x, y in self.properties.items() if not y.computed]) def enableJournalling(self): """Turn journalling on for this class """ self.do_journal = 1 def disableJournalling(self): """Turn journalling off for this class """ self.do_journal = 0 # Editing nodes: def create(self, **propvalues): """ Create a new node of this class and return its id. The keyword arguments in 'propvalues' map property names to values. The values of arguments must be acceptable for the types of their corresponding properties or a TypeError is raised. If this class has a key property, it must be present and its value must not collide with other key strings or a ValueError is raised. Any other properties on this class that are missing from the 'propvalues' dictionary are set to None. If an id in a link or multilink property does not refer to a valid node, an IndexError is raised. """ self.fireAuditors('create', None, propvalues) newid = self.create_inner(**propvalues) self.fireReactors('create', newid, None) return newid def create_inner(self, **propvalues): """ Called by create, in-between the audit and react calls. """ if 'id' in propvalues: raise KeyError('"id" is reserved') if self.db.journaltag is None: raise DatabaseError(_('Database open read-only')) if ('creator' in propvalues or 'actor' in propvalues or 'creation' in propvalues or 'activity' in propvalues): raise KeyError('"creator", "actor", "creation" and ' '"activity" are reserved') for p in propvalues: prop = self.properties[p] if prop.computed: raise KeyError('"%s" is a computed property'%p) # new node's id newid = self.db.newid(self.classname) # validate propvalues num_re = re.compile(r'^\d+$') for key, value in propvalues.items(): if key == self.key: try: self.lookup(value) except KeyError: pass else: raise ValueError('node with key "%s" exists' % value) # try to handle this property try: prop = self.properties[key] except KeyError: raise KeyError('"%s" has no property "%s"' % (self.classname, key)) if value is not None and isinstance(prop, Link): if not isinstance(value, type('')): raise ValueError('link value must be String') link_class = self.properties[key].classname # if it isn't a number, it's a key if not num_re.match(value): try: value = self.db.classes[link_class].lookup(value) except (TypeError, KeyError): raise IndexError('new property "%s": %s not a %s' % ( key, value, link_class)) elif not self.db.getclass(link_class).hasnode(value): raise IndexError('%s has no node %s' % (link_class, value)) # save off the value propvalues[key] = value # register the link with the newly linked node if self.do_journal and self.properties[key].do_journal: self.db.addjournal(link_class, value, 'link', (self.classname, newid, key)) elif isinstance(prop, Multilink): if value is None: value = [] if not hasattr(value, '__iter__') or \ isinstance(value, type('')): raise TypeError('new property "%s" not an iterable of ids' % key) # clean up and validate the list of links link_class = self.properties[key].classname l = [] for entry in value: if not isinstance(entry, type('')): raise ValueError('"%s" multilink value (%r) ' 'must contain Strings' % ( key, value)) # if it isn't a number, it's a key if not num_re.match(entry): try: entry = self.db.classes[link_class].lookup(entry) except (TypeError, KeyError): raise IndexError('new property "%s": %s not a %s' % ( key, entry, self.properties[key].classname)) l.append(entry) value = l propvalues[key] = value # handle additions for nodeid in value: if not self.db.getclass(link_class).hasnode(nodeid): raise IndexError('%s has no node %s' % (link_class, nodeid)) # register the link with the newly linked node if self.do_journal and self.properties[key].do_journal: self.db.addjournal(link_class, nodeid, 'link', (self.classname, newid, key)) elif isinstance(prop, String): if type(value) != type('') and type(value) != type(u''): raise TypeError('new property "%s" not a string'%key) if prop.indexme: self.db.indexer.add_text((self.classname, newid, key), value) elif isinstance(prop, Password): if not isinstance(value, password.Password): raise TypeError('new property "%s" not a Password'%key) elif isinstance(prop, Date): if value is not None and not isinstance(value, date.Date): raise TypeError('new property "%s" not a Date'%key) elif isinstance(prop, Interval): if value is not None and not isinstance(value, date.Interval): raise TypeError('new property "%s" not an Interval'%key) elif value is not None and isinstance(prop, Number): try: float(value) except ValueError: raise TypeError('new property "%s" not numeric'%key) elif value is not None and isinstance(prop, Integer): try: int(value) except ValueError: raise TypeError('new property "%s" not integer'%key) elif value is not None and isinstance(prop, Boolean): try: int(value) except ValueError: raise TypeError('new property "%s" not boolean'%key) # make sure there's data where there needs to be for key, prop in self.properties.items(): if key in propvalues: continue if key == self.key: raise ValueError('key property "%s" is required'%key) if isinstance(prop, Multilink): propvalues[key] = [] else: propvalues[key] = None # done self.db.addnode(self.classname, newid, propvalues) if self.do_journal: self.db.addjournal(self.classname, newid, ''"create", {}) # XXX numeric ids return str(newid) def get(self, nodeid, propname, default=_marker, cache=1): """Get the value of a property on an existing node of this class. 'nodeid' must be the id of an existing node of this class or an IndexError is raised. 'propname' must be
[1, np.nan], "B": [1.0, 2.0]} df = pandas.DataFrame(frame_data) modin_df = pd.DataFrame(frame_data) for v in ["", 1, np.nan, 1.0]: df_equals(modin_df.fillna(v), df.fillna(v)) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_fillna_skip_certain_blocks(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) # don't try to fill boolean, int blocks df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan)) def test_fillna_dict_series(self): frame_data = { "a": [np.nan, 1, 2, np.nan, np.nan], "b": [1, 2, 3, np.nan, np.nan], "c": [np.nan, 1, 2, 3, 4], } df = pandas.DataFrame(frame_data) modin_df = pd.DataFrame(frame_data) df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5})) df_equals( modin_df.fillna({"a": 0, "b": 5, "d": 7}), df.fillna({"a": 0, "b": 5, "d": 7}), ) # Series treated same as dict df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max())) def test_fillna_dataframe(self): frame_data = { "a": [np.nan, 1, 2, np.nan, np.nan], "b": [1, 2, 3, np.nan, np.nan], "c": [np.nan, 1, 2, 3, 4], } df = pandas.DataFrame(frame_data, index=list("VWXYZ")) modin_df = pd.DataFrame(frame_data, index=list("VWXYZ")) # df2 may have different index and columns df2 = pandas.DataFrame( { "a": [np.nan, 10, 20, 30, 40], "b": [50, 60, 70, 80, 90], "foo": ["bar"] * 5, }, index=list("VWXuZ"), ) modin_df2 = pd.DataFrame(df2) # only those columns and indices which are shared get filled df_equals(modin_df.fillna(modin_df2), df.fillna(df2)) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_fillna_columns(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) df_equals( modin_df.fillna(method="ffill", axis=1), pandas_df.fillna(method="ffill", axis=1), ) df_equals( modin_df.fillna(method="ffill", axis=1), pandas_df.fillna(method="ffill", axis=1), ) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_fillna_invalid_method(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) # noqa F841 with tm.assert_raises_regex(ValueError, "ffil"): modin_df.fillna(method="ffil") def test_fillna_invalid_value(self): test_data = TestData() modin_df = pd.DataFrame(test_data.frame) # list pytest.raises(TypeError, modin_df.fillna, [1, 2]) # tuple pytest.raises(TypeError, modin_df.fillna, (1, 2)) # frame with series pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_fillna_col_reordering(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill")) """ TODO: Use this when Arrow issue resolves: (https://issues.apache.org/jira/browse/ARROW-2122) def test_fillna_datetime_columns(self): frame_data = {'A': [-1, -2, np.nan], 'B': date_range('20130101', periods=3), 'C': ['foo', 'bar', None], 'D': ['foo2', 'bar2', None]} df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3)) modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3)) df_equals(modin_df.fillna('?'), df.fillna('?')) frame_data = {'A': [-1, -2, np.nan], 'B': [pandas.Timestamp('2013-01-01'), pandas.Timestamp('2013-01-02'), pandas.NaT], 'C': ['foo', 'bar', None], 'D': ['foo2', 'bar2', None]} df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3)) modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3)) df_equals(modin_df.fillna('?'), df.fillna('?')) """ @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_filter(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"} df_equals( modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"]) ) df_equals( modin_df.filter(regex=by["regex"], axis=0), pandas_df.filter(regex=by["regex"], axis=0), ) df_equals( modin_df.filter(regex=by["regex"], axis=1), pandas_df.filter(regex=by["regex"], axis=1), ) df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"])) with pytest.raises(TypeError): modin_df.filter(items=by["items"], regex=by["regex"]) with pytest.raises(TypeError): modin_df.filter() def test_first(self): i = pd.date_range("2018-04-09", periods=4, freq="2D") ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i) with pytest.warns(UserWarning): ts.first("3D") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_first_valid_index(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) assert modin_df.first_valid_index() == (pandas_df.first_valid_index()) @pytest.mark.skip(reason="Defaulting to Pandas") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_from_dict(self, data): modin_df = pd.DataFrame(data) # noqa F841 pandas_df = pandas.DataFrame(data) # noqa F841 with pytest.raises(NotImplementedError): pd.DataFrame.from_dict(None) @pytest.mark.skip(reason="Defaulting to Pandas") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_from_items(self, data): modin_df = pd.DataFrame(data) # noqa F841 pandas_df = pandas.DataFrame(data) # noqa F841 with pytest.raises(NotImplementedError): pd.DataFrame.from_items(None) @pytest.mark.skip(reason="Defaulting to Pandas") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_from_records(self, data): modin_df = pd.DataFrame(data) # noqa F841 pandas_df = pandas.DataFrame(data) # noqa F841 with pytest.raises(NotImplementedError): pd.DataFrame.from_records(None) def test_get_value(self): data = test_data_values[0] with pytest.warns(UserWarning): pd.DataFrame(data).get_value(0, "col1") def test_get_values(self): data = test_data_values[0] with pytest.warns(UserWarning): pd.DataFrame(data).get_values() @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) @pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys)) def test_head(self, data, n): # Test normal dataframe head modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) df_equals(modin_df.head(n), pandas_df.head(n)) df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1)) # Test head when we call it from a QueryCompilerView modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n) pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n) df_equals(modin_result, pandas_result) def test_hist(self): data = test_data_values[0] with pytest.warns(UserWarning): pd.DataFrame(data).hist(None) @pytest.mark.skip(reason="Defaulting to Pandas") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_iat(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) # noqa F841 with pytest.raises(NotImplementedError): modin_df.iat() @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) @pytest.mark.parametrize("axis", axis_values, ids=axis_keys) @pytest.mark.parametrize( "skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys) ) def test_idxmax(self, data, axis, skipna): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna) modin_result = modin_df.idxmax(axis=axis, skipna=skipna) df_equals(modin_result, pandas_result) pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna) modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna) df_equals(modin_result, pandas_result) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) @pytest.mark.parametrize("axis", axis_values, ids=axis_keys) @pytest.mark.parametrize( "skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys) ) def test_idxmin(self, data, axis, skipna): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_result = modin_df.idxmin(axis=axis, skipna=skipna) pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna) df_equals(modin_result, pandas_result) modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna) pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna) df_equals(modin_result, pandas_result) def test_infer_objects(self): data = test_data_values[0] with pytest.warns(UserWarning): pd.DataFrame(data).infer_objects() @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_iloc(self, request, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) if not name_contains(request.node.name, ["empty_data"]): # Scaler np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1]) # Series df_equals(modin_df.iloc[0], pandas_df.iloc[0]) df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0]) df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0]) # DataFrame df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]]) # See issue #80 # df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]]) df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2]) # Issue #43 modin_df.iloc[0:3, :] # Write Item modin_df.iloc[[1, 2]] = 42 pandas_df.iloc[[1, 2]] = 42 df_equals(modin_df, pandas_df) else: with pytest.raises(IndexError): modin_df.iloc[0, 1] @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_index(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) df_equals(modin_df.index, pandas_df.index) modin_df_cp = modin_df.copy() pandas_df_cp = pandas_df.copy() modin_df_cp.index = [str(i) for i in modin_df_cp.index] pandas_df_cp.index = [str(i) for i in pandas_df_cp.index] df_equals(modin_df_cp.index, pandas_df_cp.index) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_indexing_duplicate_axis(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))] assert any(modin_df.index.duplicated()) assert any(pandas_df.index.duplicated()) df_equals(modin_df.iloc[0], pandas_df.iloc[0]) df_equals(modin_df.loc[0], pandas_df.loc[0]) df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4]) df_equals( modin_df.loc[0, modin_df.columns[0:4]], pandas_df.loc[0, pandas_df.columns[0:4]], ) def test_info(self): data = test_data_values[0] with pytest.warns(UserWarning): pd.DataFrame(data).info(memory_usage="deep") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) @pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys)) def test_insert(self, data, loc): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_df = modin_df.copy() pandas_df = pandas_df.copy() column = "New Column" value = modin_df.iloc[:, 0] try: pandas_df.insert(loc, column, value) except Exception as e: with pytest.raises(type(e)): modin_df.insert(loc, column, value) else: modin_df.insert(loc, column, value) df_equals(modin_df, pandas_df) with pytest.raises(ValueError): modin_df.insert(0, "Bad Column", modin_df) modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]]) pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]]) df_equals(modin_df, pandas_df) modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_df.insert(0, "Scalar", 100) pandas_df.insert(0, "Scalar", 100) df_equals(modin_df, pandas_df) with pytest.raises(ValueError): modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1]) with pytest.raises(ValueError): modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]]) with pytest.raises(IndexError): modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100) modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_result = pd.DataFrame(columns=list("ab")).insert( 0, modin_df.columns[0], modin_df[modin_df.columns[0]] ) pandas_result = pandas.DataFrame(columns=list("ab")).insert( 0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]] ) df_equals(modin_result, pandas_result) modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_result = pd.DataFrame(index=modin_df.index).insert( 0, modin_df.columns[0], modin_df[modin_df.columns[0]] ) pandas_result = pandas.DataFrame(index=pandas_df.index).insert( 0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]] ) df_equals(modin_result, pandas_result) modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_result = modin_df.insert( 0, "DataFrame insert", modin_df[[modin_df.columns[0]]] ) pandas_result = pandas_df.insert( 0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]] ) df_equals(modin_result, pandas_result) def test_interpolate(self): data = test_data_values[0] with pytest.warns(UserWarning): pd.DataFrame(data).interpolate() def test_is_copy(self): data = test_data_values[0] with pytest.warns(FutureWarning): assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_items(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_items = modin_df.items() pandas_items = pandas_df.items() for modin_item, pandas_item in zip(modin_items, pandas_items): modin_index, modin_series = modin_item pandas_index, pandas_series = pandas_item df_equals(pandas_series, modin_series) assert pandas_index == modin_index @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_iteritems(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_items = modin_df.iteritems() pandas_items = pandas_df.iteritems() for modin_item, pandas_item in zip(modin_items, pandas_items): modin_index, modin_series = modin_item pandas_index, pandas_series = pandas_item df_equals(pandas_series, modin_series) assert pandas_index == modin_index @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_iterrows(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) modin_iterrows = modin_df.iterrows() pandas_iterrows = pandas_df.iterrows() for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows): modin_index, modin_series = modin_row pandas_index, pandas_series = pandas_row df_equals(pandas_series, modin_series) assert pandas_index == modin_index @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_itertuples(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) # test default modin_it_default = modin_df.itertuples() pandas_it_default = pandas_df.itertuples() for modin_row, pandas_row in zip(modin_it_default, pandas_it_default): np.testing.assert_equal(modin_row, pandas_row) # test all combinations of custom params indices = [True, False] names = [None, "NotPandas", "Pandas"] for index in indices: for name in names: modin_it_custom = modin_df.itertuples(index=index, name=name) pandas_it_custom = pandas_df.itertuples(index=index, name=name) for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom): np.testing.assert_equal(modin_row, pandas_row) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_ix(self, data): modin_df = pd.DataFrame(data) pandas_df = pandas.DataFrame(data) # noqa F841 with pytest.raises(NotImplementedError): modin_df.ix() def test_join(self): frame_data = { "col1": [0, 1, 2, 3], "col2": [4, 5, 6, 7], "col3": [8, 9, 0, 1], "col4": [2, 4, 5, 6], } modin_df = pd.DataFrame(frame_data) pandas_df = pandas.DataFrame(frame_data) frame_data2 = {"col5": [0], "col6": [1]} modin_df2 =
from typing import Union, List, Optional from pyspark.sql.types import ( StructType, StructField, StringType, ArrayType, BooleanType, DataType, ) # This file is auto-generated by generate_schema so do not edit manually # noinspection PyPep8Naming class TestScriptSchema: """ A structured set of tests against a FHIR server implementation to determine compliance against the FHIR specification. """ # noinspection PyDefaultArgument @staticmethod def get_schema( max_nesting_depth: Optional[int] = 6, nesting_depth: int = 0, nesting_list: List[str] = [], max_recursion_limit: Optional[int] = 2, include_extension: Optional[bool] = False, extension_fields: Optional[List[str]] = [ "valueBoolean", "valueCode", "valueDate", "valueDateTime", "valueDecimal", "valueId", "valueInteger", "valuePositiveInt", "valueString", "valueTime", "valueUnsignedInt", "valueUri", "valueQuantity", ], extension_depth: int = 0, max_extension_depth: Optional[int] = 2, ) -> Union[StructType, DataType]: """ A structured set of tests against a FHIR server implementation to determine compliance against the FHIR specification. id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. extension: May be used to represent additional information that is not part of the basic definition of the resource. In order to make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content may not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource, and may be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. resourceType: This is a TestScript resource url: An absolute URI that is used to identify this test script when it is referenced in a specification, model, design or an instance. This SHALL be a URL, SHOULD be globally unique, and SHOULD be an address at which this test script is (or will be) published. The URL SHOULD include the major version of the test script. For more information see [Technical and Business Versions](resource.html#versions). identifier: A formal identifier that is used to identify this test script when it is represented in other formats, or referenced in a specification, model, design or an instance. version: The identifier that is used to identify this version of the test script when it is referenced in a specification, model, design or instance. This is an arbitrary value managed by the test script author and is not expected to be globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is not available. There is also no expectation that versions can be placed in a lexicographical sequence. name: A natural language name identifying the test script. This name should be usable as an identifier for the module by machine processing applications such as code generation. title: A short, descriptive, user-friendly title for the test script. status: The status of this test script. Enables tracking the life-cycle of the content. experimental: A boolean value to indicate that this test script is authored for testing purposes (or education/evaluation/marketing), and is not intended to be used for genuine usage. date: The date (and optionally time) when the test script was published. The date must change if and when the business version changes and it must change if the status code changes. In addition, it should change when the substantive content of the test script changes. publisher: The name of the individual or organization that published the test script. contact: Contact details to assist a user in finding and communicating with the publisher. description: A free text natural language description of the test script from a consumer's perspective. useContext: The content was developed with a focus and intent of supporting the contexts that are listed. These terms may be used to assist with indexing and searching for appropriate test script instances. jurisdiction: A legal or geographic region in which the test script is intended to be used. purpose: Explaination of why this test script is needed and why it has been designed as it has. copyright: A copyright statement relating to the test script and/or its contents. Copyright statements are generally legal restrictions on the use and publishing of the test script. origin: An abstract server used in operations within this test script in the origin element. destination: An abstract server used in operations within this test script in the destination element. metadata: The required capability must exist and are assumed to function correctly on the FHIR server being tested. fixture: Fixture in the test script - by reference (uri). All fixtures are required for the test script to execute. profile: Reference to the profile to be used for validation. variable: Variable is set based either on element value in response body or on header field value in the response headers. rule: Assert rule to be used in one or more asserts within the test script. ruleset: Contains one or more rules. Offers a way to group rules so assertions could reference the group of rules and have them all applied. setup: A series of required setup operations before tests are executed. test: A test in this script. teardown: A series of operations required to clean up after the all the tests are executed (successfully or otherwise). """ from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.stu3.complex_types.contactdetail import ( ContactDetailSchema, ) from spark_fhir_schemas.stu3.complex_types.usagecontext import ( UsageContextSchema, ) from spark_fhir_schemas.stu3.complex_types.codeableconcept import ( CodeableConceptSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_origin import ( TestScript_OriginSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_destination import ( TestScript_DestinationSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_metadata import ( TestScript_MetadataSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_fixture import ( TestScript_FixtureSchema, ) from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema from spark_fhir_schemas.stu3.complex_types.testscript_variable import ( TestScript_VariableSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_rule import ( TestScript_RuleSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_ruleset import ( TestScript_RulesetSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_setup import ( TestScript_SetupSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_test import ( TestScript_TestSchema, ) from spark_fhir_schemas.stu3.complex_types.testscript_teardown import ( TestScript_TeardownSchema, ) if ( max_recursion_limit and nesting_list.count("TestScript") >= max_recursion_limit ) or (max_nesting_depth and nesting_depth >= max_nesting_depth): return StructType([StructField("id", StringType(), True)]) # add my name to recursion list for later my_nesting_list: List[str] = nesting_list + ["TestScript"] schema = StructType( [ # The logical id of the resource, as used in the URL for the resource. Once # assigned, this value never changes. StructField("id", StringType(), True), # May be used to represent additional information that is not part of the basic # definition of the resource. In order to make the use of extensions safe and # manageable, there is a strict set of governance applied to the definition and # use of extensions. Though any implementer is allowed to define an extension, # there is a set of requirements that SHALL be met as part of the definition of # the extension. StructField( "extension", ArrayType( ExtensionSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, ) ), True, ), # The metadata about the resource. This is content that is maintained by the # infrastructure. Changes to the content may not always be associated with
<reponame>rtflynn/tlslite-ng<filename>tlslite/utils/rsakey.py # Author: <NAME> # See the LICENSE file for legal information regarding use of this file. """Abstract class for RSA.""" from .cryptomath import * from . import tlshashlib as hashlib from ..errors import MaskTooLongError, MessageTooLongError, EncodingError, \ InvalidSignature, UnknownRSAType class RSAKey(object): """This is an abstract base class for RSA keys. Particular implementations of RSA keys, such as :py:class:`~.openssl_rsakey.OpenSSL_RSAKey`, :py:class:`~.python_rsakey.Python_RSAKey`, and :py:class:`~.pycrypto_rsakey.PyCrypto_RSAKey`, inherit from this. To create or parse an RSA key, don't use one of these classes directly. Instead, use the factory functions in :py:class:`~tlslite.utils.keyfactory`. """ def __init__(self, n=0, e=0): """Create a new RSA key. If n and e are passed in, the new key will be initialized. :type n: int :param n: RSA modulus. :type e: int :param e: RSA public exponent. """ raise NotImplementedError() def __len__(self): """Return the length of this key in bits. :rtype: int """ return numBits(self.n) def hasPrivateKey(self): """Return whether or not this key has a private component. :rtype: bool """ raise NotImplementedError() def hashAndSign(self, bytes, rsaScheme='PKCS1', hAlg='sha1', sLen=0): """Hash and sign the passed-in bytes. This requires the key to have a private component. It performs a PKCS1 or PSS signature on the passed-in data with selected hash algorithm. :type bytes: bytes-like object :param bytes: The value which will be hashed and signed. :type rsaScheme: str :param rsaScheme: The type of RSA scheme that will be applied, "PKCS1" for RSASSA-PKCS#1 v1.5 signature and "PSS" for RSASSA-PSS with MGF1 signature method :type hAlg: str :param hAlg: The hash algorithm that will be used :type sLen: int :param sLen: The length of intended salt value, applicable only for RSASSA-PSS signatures :rtype: bytearray :returns: A PKCS1 or PSS signature on the passed-in data. """ rsaScheme = rsaScheme.lower() hAlg = hAlg.lower() hashBytes = secureHash(bytearray(bytes), hAlg) return self.sign(hashBytes, padding=rsaScheme, hashAlg=hAlg, saltLen=sLen) def hashAndVerify(self, sigBytes, bytes, rsaScheme='PKCS1', hAlg='sha1', sLen=0): """Hash and verify the passed-in bytes with the signature. This verifies a PKCS1 or PSS signature on the passed-in data with selected hash algorithm. :type sigBytes: bytes-like object :param sigBytes: A PKCS1 or PSS signature. :type bytes: bytes-like object :param bytes: The value which will be hashed and verified. :type rsaScheme: str :param rsaScheme: The type of RSA scheme that will be applied, "PKCS1" for RSASSA-PKCS#1 v1.5 signature and "PSS" for RSASSA-PSS with MGF1 signature method :type hAlg: str :param hAlg: The hash algorithm that will be used :type sLen: int :param sLen: The length of intended salt value, applicable only for RSASSA-PSS signatures :rtype: bool :returns: Whether the signature matches the passed-in data. """ rsaScheme = rsaScheme.lower() hAlg = hAlg.lower() hashBytes = secureHash(bytearray(bytes), hAlg) return self.verify(sigBytes, hashBytes, rsaScheme, hAlg, sLen) def MGF1(self, mgfSeed, maskLen, hAlg): """Generate mask from passed-in seed. This generates mask based on passed-in seed and output maskLen. :type mgfSeed: bytearray :param mgfSeed: Seed from which mask will be generated. :type maskLen: int :param maskLen: Wished length of the mask, in octets :rtype: bytearray :returns: Mask """ hashLen = getattr(hashlib, hAlg)().digest_size if maskLen > (2 ** 32) * hashLen: raise MaskTooLongError("Incorrect parameter maskLen") T = bytearray() end = divceil(maskLen, hashLen) for x in range(0, end): C = numberToByteArray(x, 4) T += secureHash(mgfSeed + C, hAlg) return T[:maskLen] def EMSA_PSS_encode(self, mHash, emBits, hAlg, sLen=0): """Encode the passed in message This encodes the message using selected hash algorithm :type mHash: bytearray :param mHash: Hash of message to be encoded :type emBits: int :param emBits: maximal length of returned EM :type hAlg: str :param hAlg: hash algorithm to be used :type sLen: int :param sLen: length of salt""" hashLen = getattr(hashlib, hAlg)().digest_size emLen = divceil(emBits, 8) if emLen < hashLen + sLen + 2: raise EncodingError("The ending limit too short for " + "selected hash and salt length") salt = getRandomBytes(sLen) M2 = bytearray(8) + mHash + salt H = secureHash(M2, hAlg) PS = bytearray(emLen - sLen - hashLen - 2) DB = PS + bytearray(b'\x01') + salt dbMask = self.MGF1(H, emLen - hashLen - 1, hAlg) maskedDB = bytearray(i ^ j for i, j in zip(DB, dbMask)) mLen = emLen*8 - emBits mask = (1 << 8 - mLen) - 1 maskedDB[0] &= mask EM = maskedDB + H + bytearray(b'\xbc') return EM def RSASSA_PSS_sign(self, mHash, hAlg, sLen=0): """"Sign the passed in message This signs the message using selected hash algorithm :type mHash: bytes-like object :param mHash: Hash of message to be signed :type hAlg: str :param hAlg: hash algorithm to be used :type sLen: int :param sLen: length of salt""" EM = self.EMSA_PSS_encode(mHash, numBits(self.n) - 1, hAlg, sLen) m = bytesToNumber(EM) if m >= self.n: raise MessageTooLongError("Encode output too long") s = self._rawPrivateKeyOp(m) S = numberToByteArray(s, numBytes(self.n)) return S def EMSA_PSS_verify(self, mHash, EM, emBits, hAlg, sLen=0): """Verify signature in passed in encoded message This verifies the signature in encoded message :type mHash: bytes-like object :param mHash: Hash of the original not signed message :type EM: bytes-like object :param EM: Encoded message :type emBits: int :param emBits: Length of the encoded message in bits :type hAlg: str :param hAlg: hash algorithm to be used :type sLen: int :param sLen: Length of salt """ hashLen = getattr(hashlib, hAlg)().digest_size emLen = divceil(emBits, 8) if emLen < hashLen + sLen + 2: raise InvalidSignature("Invalid signature") if EM[-1] != 0xbc: raise InvalidSignature("Invalid signature") maskedDB = EM[0:emLen - hashLen - 1] H = EM[emLen - hashLen - 1:emLen - hashLen - 1 + hashLen] DBHelpMask = 1 << 8 - (8*emLen - emBits) DBHelpMask -= 1 DBHelpMask = (~DBHelpMask) & 0xff if maskedDB[0] & DBHelpMask != 0: raise InvalidSignature("Invalid signature") dbMask = self.MGF1(H, emLen - hashLen - 1, hAlg) DB = bytearray(i ^ j for i, j in zip(maskedDB, dbMask)) mLen = emLen*8 - emBits mask = (1 << 8 - mLen) - 1 DB[0] &= mask if any(x != 0 for x in DB[0:emLen - hashLen - sLen - 2]): raise InvalidSignature("Invalid signature") if DB[emLen - hashLen - sLen - 2] != 0x01: raise InvalidSignature("Invalid signature") if sLen != 0: salt = DB[-sLen:] else: salt = bytearray() newM = bytearray(8) + mHash + salt newH = secureHash(newM, hAlg) if H == newH: return True else: raise InvalidSignature("Invalid signature") def RSASSA_PSS_verify(self, mHash, S, hAlg, sLen=0): """Verify the signature in passed in message This verifies the signature in the signed message :type mHash: bytes-like object :param mHash: Hash of original message :type S: bytes-like object :param S: Signed message :type hAlg: str :param hAlg: Hash algorithm to be used :type sLen: int :param sLen: Length of salt """ if len(bytearray(S)) != len(numberToByteArray(self.n)): raise InvalidSignature("Invalid signature") s = bytesToNumber(S) m = self._rawPublicKeyOp(s) EM = numberToByteArray(m, divceil(numBits(self.n) - 1, 8)) result = self.EMSA_PSS_verify(mHash, EM, numBits(self.n) - 1, hAlg, sLen) if result: return True else: raise InvalidSignature("Invalid signature") def _raw_pkcs1_sign(self, bytes): """Perform signature on raw data, add PKCS#1 padding.""" if not self.hasPrivateKey(): raise AssertionError() paddedBytes = self._addPKCS1Padding(bytes, 1) m = bytesToNumber(paddedBytes) if m >= self.n: raise ValueError() c = self._rawPrivateKeyOp(m) sigBytes = numberToByteArray(c, numBytes(self.n)) return sigBytes def sign(self, bytes, padding='pkcs1', hashAlg=None, saltLen=None): """Sign the passed-in bytes. This requires the key to have a private component. It performs a PKCS1 signature on the passed-in data. :type bytes: bytes-like object :param bytes: The value which will be signed. :type padding: str :param padding: name of the rsa padding mode to use, supported: "pkcs1" for RSASSA-PKCS1_1_5 and "pss" for RSASSA-PSS. :type hashAlg: str :param hashAlg: name of hash to be encoded using the PKCS#1 prefix for "pkcs1" padding or the hash used for MGF1 in "pss". Parameter is mandatory for "pss" padding. :type saltLen: int :param saltLen: length of salt used for the PSS padding. Default is the length of the hash output used. :rtype: bytearray :returns: A PKCS1 signature on the passed-in data. """ padding = padding.lower() if padding == 'pkcs1': if hashAlg is not None: bytes = self.addPKCS1Prefix(bytes, hashAlg) sigBytes = self._raw_pkcs1_sign(bytes) elif padding == "pss": sigBytes
<filename>craftroom/displays/regions.py<gh_stars>1-10 '''Wrapper to generate ds9 region files, for sketching on FITS images in ds9.''' class Regions(): def __init__(self, name, units='physical', path=''): self.name = name self.filename = path + self.name + '.reg' self.regions = [] self.addHeader() self.addUnits(units) def addUnits(self, units='physical'): self.units = units def addHeader(self): self.header = '# Region file format: DS9 version 4.1\n' self.header += '# Filename: {0}\n'.format(self.filename) self.header += 'global color=magenta width=2 font="helvetica 10 bold roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=0' def options(self, options): line = '' if len(options.keys()) > 0: line += ' # ' for key, value in options.items(): if key == 'text' or key == 'font': line += key + '=' + '{' + str(value) + '}' + ' ' else: line += key + '=' + str(value) + ' ' return line def addCircle(self, x, y, size=10, **kwargs): line = self.units + "; " line += "circle({0},{1},{2})".format(x, y, size) + self.options(kwargs) self.regions.append(line) def addText(self, x, y, text='bla!', size=10, **kwargs): line = self.units + "; " line += "text({0},{1},{2})".format(x, y, '{' + text +'}') + self.options(kwargs) self.regions.append(line) def addCompass(self, x, y, size=10, **kwargs): line = "# compass({0},{1},{2}) compass=fk5 'N' 'E' 1 1".format(x, y, size) + self.options(kwargs) self.regions.append(line) def addBox(self, x, y, w, h, **kwargs): line = self.units + "; " line += "box({0},{1},{2},{3})".format(x,y,w,h) + self.options(kwargs) self.regions.append(line) def addLine(self, x1, y1, x2, y2, **kwargs): line = self.units + "; " line += "line({0},{1},{2},{3})".format( x1, y1, x2, y2) + self.options(kwargs) self.regions.append(line) def write(self, filename=None): if filename is None: filename = self.filename f = open(filename, 'w') f.writelines(str(self)) f.close def __str__(self): lines = [self.header, self.units] lines.extend(self.regions) return '\n'.join(lines) ''' Regions Regions provide a means for marking particular areas of an image for further analysis. Regions may also be used for presentation purposes. DS9 supports a number of region descriptions, each of which may be edited, moved, rotated, displayed, saved and loaded, via the GUI and XPA. Region Descriptions Region Properties Region File Format Composite Region Template Region External Region Files Region Descriptions Circle Usage: circle x y radius Ellipse Usage: ellipse x y radius radius angle Box Usage: box x y width height angle Polygon Usage: polygon x1 y1 x2 y2 x3 y3 ... Point Usage: point x y # point=[circle|box|diamond|cross|x|arrow|boxcircle] [size] circle point x y Line Usage: line x1 y1 x2 y2 # line=[0|1] [0|1] Vector Usage: vector x1 y1 length angle # vector=[0|1] Text Usage: text x y # text={Your Text Here} text x y {Your Text Here} Ruler Usage: ruler x1 y1 x2 y2 # ruler=[pixels|degrees|arcmin|arcsec] Compass Usage: compass x1 y1 length # compass=<coordinate system> <north label> <east label> [0|1] [0|1] Projection Usage: projection x1 y1 x2 y2 width Annulus Usage: annulus x y inner outer n=# annulus x y r1 r2 r3... Ellipse Annulus Usage: ellipse x y r11 r12 r21 r22 n=# [angle] ellipse x y r11 r12 r21 r22 r31 r32 ... [angle] Box Annulus Usage: box x y w1 h1 w2 h2 [angle] box x y w1 h1 w2 h2 w3 h3 ... [angle] Panda Usage: panda x y startangle stopangle nangle inner outer nradius Epanda Usage: epanda x y startangle stopangle nangle inner outer nradius [angle] Bpanda Usage: bpanda x y startangle stopangle nangle inner outer nradius [angle] Composite Usage: # composite x y angle Region Properties Each region has a number of properties associated with the region, which indicates how the region is to be rendered or manipulated. Properties are defined for a region in the comment section of the region description. The exception is the Include/Exclude property. It is set via '+' or '-' preceding the region. In addition, the Line, Point, and Ruler regions have unique properties, not shared by others. Not all properties are available via the GUI or are applicable for all regions. Text All regions may have text associated with them. Use the text property to set the text. Strings may be quoted with " or ' or {}. For best results, use {}. Example: circle(100,100,20) # text = {This message has both a " and ' in it} Color The color property specifies the color of the region when rendered. The follow 8 colors are supported: Example: circle(100,100,20) # color = green Dash List Sets dashed line parameters. This does not render the region in dashed lines. Example: circle(100,100,20) # dashlist = 8 3 Width Sets the line width used to render the region. Example: circle(100,100,20) # width = 2 Font The font property specifies the font family, size, weight, and slant of any text to be displayed along with the region. Example: circle(100,100,20) # font="times 12 bold italic" Can Select The Select property specifies if the user is allowed to select (hence, edit) the region via the GUI. For Regions used for catalogs and such, it is desirable that the user is unable to edit, move, or delete the region. Example: circle(100,100,20) # select = 1 Can Highlite The Highlite property specifies if the edit handles become visible when the region is selected. Example: circle(100,100,20) # hightlite = 1 Dash Render region using dashed lines using current dashlist value. Example: circle(100,100,20) # dash = 1 Fixed in Size The Fixed in Size property specifies that the region does not change in size as the image magnification factor changes. This allows the user to build complex pointer type regions. Example: circle(100,100,20) # fixed = 1 Can Edit The Edit property specifies if the user is allowed to edit the region via the GUI. Example: circle(100,100,20) # edit = 1 Can Move The Move property specifies if the user is allowed to move the region via the GUI. Example: circle(100,100,20) # move = 1 Can Rotate The Rotate property specifies if the user is allowed to rotate the region via the GUI. Example: circle(100,100,20) # rotate = 1 Can Delete The Delete property specifies if the user is allowed to delete the region via the GUI. Example: circle(100,100,20) # delete = 1 Include/Exclude The Include/Exclude properties flags the region with a boolean NOT for later analysis. Use '+' for include (default), '-' for exclude. Example: -circle(100,100,20) Source/Background The Source/Background properties flag the region for use with other analysis applications. The default is source Example: circle(100,100,20) # source circle(200,200,10) # background Tag All regions may have zero or more tags associated with it, which may be used for grouping and searching. Example: circle(100,100,20) # tag = {Group 1} tag = {Group 2} Line The line region may be rendered with arrows, one at each end. To indicate arrows, use the line property. A '1' indicates an arrow, '0' indicates no arrow. Example: line(100,100,200,200) # line= 1 1 Ruler The ruler region may display information in 'pixels', 'degrees', 'arcmin', or 'arcsec'. Use the ruler property to indicate which format to display distances in. Example: ruler(100,100,200,200) # ruler=arcmin Point Point regions have an associated type and size. Use the point property to set the point type. Example: point(100,100) # point=diamond 31 Default Properties The default properties are: text={} color=green font="helvetica 10 normal roman" select=1 edit=1 move=1 delete=1 highlite=1 include=1 fixed=0 Region File Format Syntax Region arguments may be separated with either a comma or space. Optional parentheses may be used a the beginning and end of a description. circle 100 100 10 circle(100 100 10) circle(100,100,10) Comments All lines that begin with # are comments and will be ignored. # This is a comment Delimiter All lines may be delimited with either a new-line or semi-colon. circle 100 100 10 ellipse 200 200 20 40 ; box 300 300 20 40 Header A DS9 region file may start with the following optional header: # Region file format: DS9 version 4.0 Global Properties Global properties affect all regions unless a local property is specified. The global keyword is first, followed by a list of keyword = value pairs. Multiple global property lines may be used within a region file. global color=green font="helvetica 10 normal roman" edit=1 move=1 delete=1 highlite=1 include=1 wcs=wcs Local Properties Local properties start with a # after a region description and only affect the region it is specified with. physical;circle(504,513,20) # color=red text={This is a Circle} Coordinate Systems For each region, it is important to specify the coordinate system used to interpret the region, i.e., to set the context in which the position and size values are interpreted. For this purpose, the following keywords are recognized: PHYSICAL # pixel coords of original file using LTM/LTV IMAGE # pixel coords of current file FK4, B1950 # sky coordinate systems FK5, J2000 # sky coordinate systems GALACTIC # sky coordinate systems ECLIPTIC # sky coordinate systems ICRS # currently same as J2000 LINEAR # linear wcs as defined in file AMPLIFIER # mosaic coords of original file using ATM/ATV DETECTOR # mosaic coords of original file usingDTM/DTV Mosaic Images While some coordinate systems are unique across mosaic images, others coordinate systems, such as image, or physical , are valid on a per segment basis. In this case, use tile to specify which
false return 200 OK and mix of successfully created objects and any with validation errors :param int unitdp: e.g. unitdp=4 – (Unit Decimal Places) You can opt in to use four decimal places for unit amounts :param bool _return_http_data_only: return received data only :param bool _preload_content: load received data in models :param bool _request_timeout: maximum wait time for response :return: Invoices """ # verify the required parameter 'xero_tenant_id' is set if xero_tenant_id is None: raise ValueError( "Missing the required parameter `xero_tenant_id` " "when calling `create_invoices`" ) # verify the required parameter 'invoices' is set if invoices is None: raise ValueError( "Missing the required parameter `invoices` " "when calling `create_invoices`" ) collection_formats = {} path_params = {} query_params = [] if summarize_errors is not empty: query_params.append(("summarizeErrors", summarize_errors)) if unitdp is not empty: query_params.append(("unitdp", unitdp)) header_params = { "xero-tenant-id": xero_tenant_id, } local_var_files = {} form_params = [] body_params = invoices # HTTP header `Accept` header_params["Accept"] = self.api_client.select_header_accept( ["application/json"] ) # HTTP header `Content-Type` header_params["Content-Type"] = self.api_client.select_header_content_type( ["application/json"] ) # Authentication setting auth_settings = ["OAuth2"] url = self.get_resource_url("/Invoices") try: return self.api_client.call_api( url, "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="Invoices", response_model_finder=self.get_model_finder(), auth_settings=auth_settings, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) except exceptions.HTTPStatusException as error: raise translate_status_exception(error, self, "create_invoices") def create_item_history( self, xero_tenant_id, item_id, history_records, _return_http_data_only=True, _preload_content=True, _request_timeout=None, ): """Creates a history record for a specific item # noqa: E501 OAuth2 scope: accounting.settings :param str xero_tenant_id: Xero identifier for Tenant (required) :param str item_id: Unique identifier for an Item (required) :param HistoryRecords history_records: HistoryRecords containing an array of HistoryRecord objects in body of request (required) :param bool _return_http_data_only: return received data only :param bool _preload_content: load received data in models :param bool _request_timeout: maximum wait time for response :return: HistoryRecords """ # verify the required parameter 'xero_tenant_id' is set if xero_tenant_id is None: raise ValueError( "Missing the required parameter `xero_tenant_id` " "when calling `create_item_history`" ) # verify the required parameter 'item_id' is set if item_id is None: raise ValueError( "Missing the required parameter `item_id` " "when calling `create_item_history`" ) # verify the required parameter 'history_records' is set if history_records is None: raise ValueError( "Missing the required parameter `history_records` " "when calling `create_item_history`" ) collection_formats = {} path_params = { "ItemID": item_id, } query_params = [] header_params = { "xero-tenant-id": xero_tenant_id, } local_var_files = {} form_params = [] body_params = history_records # HTTP header `Accept` header_params["Accept"] = self.api_client.select_header_accept( ["application/json"] ) # HTTP header `Content-Type` header_params["Content-Type"] = self.api_client.select_header_content_type( ["application/json"] ) # Authentication setting auth_settings = ["OAuth2"] url = self.get_resource_url("/Items/{ItemID}/History") try: return self.api_client.call_api( url, "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="HistoryRecords", response_model_finder=self.get_model_finder(), auth_settings=auth_settings, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) except exceptions.HTTPStatusException as error: raise translate_status_exception(error, self, "create_item_history") def create_items( self, xero_tenant_id, items, summarize_errors=empty, unitdp=empty, _return_http_data_only=True, _preload_content=True, _request_timeout=None, ): """Creates one or more items # noqa: E501 OAuth2 scope: accounting.settings :param str xero_tenant_id: Xero identifier for Tenant (required) :param Items items: Items with an array of Item objects in body of request (required) :param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors :param int unitdp: e.g. unitdp=4 – (Unit Decimal Places) You can opt in to use four decimal places for unit amounts :param bool _return_http_data_only: return received data only :param bool _preload_content: load received data in models :param bool _request_timeout: maximum wait time for response :return: Items """ # verify the required parameter 'xero_tenant_id' is set if xero_tenant_id is None: raise ValueError( "Missing the required parameter `xero_tenant_id` " "when calling `create_items`" ) # verify the required parameter 'items' is set if items is None: raise ValueError( "Missing the required parameter `items` " "when calling `create_items`" ) collection_formats = {} path_params = {} query_params = [] if summarize_errors is not empty: query_params.append(("summarizeErrors", summarize_errors)) if unitdp is not empty: query_params.append(("unitdp", unitdp)) header_params = { "xero-tenant-id": xero_tenant_id, } local_var_files = {} form_params = [] body_params = items # HTTP header `Accept` header_params["Accept"] = self.api_client.select_header_accept( ["application/json"] ) # HTTP header `Content-Type` header_params["Content-Type"] = self.api_client.select_header_content_type( ["application/json"] ) # Authentication setting auth_settings = ["OAuth2"] url = self.get_resource_url("/Items") try: return self.api_client.call_api( url, "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="Items", response_model_finder=self.get_model_finder(), auth_settings=auth_settings, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) except exceptions.HTTPStatusException as error: raise translate_status_exception(error, self, "create_items") def create_linked_transaction( self, xero_tenant_id, linked_transaction, _return_http_data_only=True, _preload_content=True, _request_timeout=None, ): """Creates linked transactions (billable expenses) # noqa: E501 OAuth2 scope: accounting.transactions :param str xero_tenant_id: Xero identifier for Tenant (required) :param LinkedTransaction linked_transaction: LinkedTransaction object in body of request (required) :param bool _return_http_data_only: return received data only :param bool _preload_content: load received data in models :param bool _request_timeout: maximum wait time for response :return: LinkedTransactions """ # verify the required parameter 'xero_tenant_id' is set if xero_tenant_id is None: raise ValueError( "Missing the required parameter `xero_tenant_id` " "when calling `create_linked_transaction`" ) # verify the required parameter 'linked_transaction' is set if linked_transaction is None: raise ValueError( "Missing the required parameter `linked_transaction` " "when calling `create_linked_transaction`" ) collection_formats = {} path_params = {} query_params = [] header_params = { "xero-tenant-id": xero_tenant_id, } local_var_files = {} form_params = [] body_params = linked_transaction # HTTP header `Accept` header_params["Accept"] = self.api_client.select_header_accept( ["application/json"] ) # HTTP header `Content-Type` header_params["Content-Type"] = self.api_client.select_header_content_type( ["application/json"] ) # Authentication setting auth_settings = ["OAuth2"] url = self.get_resource_url("/LinkedTransactions") try: return self.api_client.call_api( url, "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="LinkedTransactions", response_model_finder=self.get_model_finder(), auth_settings=auth_settings, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) except exceptions.HTTPStatusException as error: raise translate_status_exception(error, self, "create_linked_transaction") def create_manual_journal_attachment_by_file_name( self, xero_tenant_id, manual_journal_id, file_name, body, _return_http_data_only=True, _preload_content=True, _request_timeout=None, ): """Creates a specific attachment for a specific manual journal by file name # noqa: E501 OAuth2 scope: accounting.attachments :param str xero_tenant_id: Xero identifier for Tenant (required) :param str manual_journal_id: Unique identifier for a ManualJournal (required) :param str file_name: Name of the attachment (required) :param str body: Byte array of file in body of request (required) :param bool _return_http_data_only: return received data only :param bool _preload_content: load received data in models :param bool _request_timeout: maximum wait time for response :return: Attachments """ # verify the required parameter 'xero_tenant_id' is set if xero_tenant_id is None: raise ValueError( "Missing the required parameter `xero_tenant_id` " "when calling `create_manual_journal_attachment_by_file_name`" ) # verify the required parameter 'manual_journal_id' is set if manual_journal_id is None: raise ValueError( "Missing the required parameter `manual_journal_id` " "when calling `create_manual_journal_attachment_by_file_name`" ) # verify the required parameter 'file_name' is set if file_name is None: raise ValueError( "Missing the required parameter `file_name` " "when calling `create_manual_journal_attachment_by_file_name`" ) # verify the required parameter 'body' is set if body is None: raise ValueError( "Missing the required parameter `body` " "when calling `create_manual_journal_attachment_by_file_name`" ) collection_formats = {} path_params = { "ManualJournalID": manual_journal_id, "FileName": file_name, } query_params = [] header_params = { "xero-tenant-id": xero_tenant_id, } local_var_files = {} form_params = [] body_params = body # HTTP header `Accept` header_params["Accept"] = self.api_client.select_header_accept( ["application/json"] ) # HTTP header `Content-Type` header_params["Content-Type"] = self.api_client.select_header_content_type( ["application/octet-stream"] ) # Authentication setting auth_settings = ["OAuth2"] url = self.get_resource_url( "/ManualJournals/{ManualJournalID}/Attachments/{FileName}" ) try: return self.api_client.call_api( url, "PUT", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="Attachments", response_model_finder=self.get_model_finder(), auth_settings=auth_settings, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) except exceptions.HTTPStatusException as error: raise translate_status_exception( error, self, "create_manual_journal_attachment_by_file_name" ) def create_manual_journal_history_record( self, xero_tenant_id, manual_journal_id, history_records, _return_http_data_only=True, _preload_content=True, _request_timeout=None, ): """Creates a history record for a specific manual journal # noqa: E501 OAuth2 scope: accounting.transactions :param str xero_tenant_id: Xero identifier for Tenant (required) :param str manual_journal_id: Unique identifier for a ManualJournal (required) :param HistoryRecords history_records: HistoryRecords containing an array of HistoryRecord objects in body of request (required) :param bool _return_http_data_only: return received data only :param bool _preload_content: load received data in models :param bool _request_timeout: maximum wait time for response :return: HistoryRecords """ # verify the required parameter 'xero_tenant_id' is set if xero_tenant_id is None: raise ValueError( "Missing the required parameter `xero_tenant_id` " "when calling `create_manual_journal_history_record`" ) # verify the required parameter 'manual_journal_id' is set if manual_journal_id is None: raise ValueError( "Missing the required parameter `manual_journal_id` "
<reponame>mobergd/autoio """ Writes strings containing the rate parameters """ # Define the name_buffer between the longest reaction name and the Arr params BUFFER = 5 # Functions to write the parameters in the correct format def troe(reaction, high_params, low_params, troe_params, colliders=None, max_length=45, name_buffer=BUFFER): """ Write the string containing the Lindemann fitting parameters formatted for Chemkin input files. :param reaction: Chemkin formatted string with chemical equation :type reaction: str :param high_params: Arrhenius high-P parameters :type high_params: list of floats :param low_params: Arrhenius low-P parameters :type low_params: list of floats :param troe_params: Troe parameters: alpha, T***, T*, and T** (T** is optional) :type troe_params: list of floats :param colliders: names and collision enhancement factors for bath gases :type colliders: list((str, float)) :return troe_str: Chemkin reaction string with Troe parameters :rtype: str """ assert len(high_params) == 3, ( f'{len(high_params)} highP params for {reaction}, should be 3' ) assert len(low_params) == 3, ( f'{len(low_params)} lowP params for {reaction}, should be 3' ) assert len(troe_params) in (3, 4), ( f'{len(troe_params)} Troe params for {reaction}, should be 3 or 4' ) troe_str = _highp_str(reaction, high_params, max_length=max_length, name_buffer=BUFFER) troe_str += _lowp_str(reaction, low_params, max_length=max_length, name_buffer=BUFFER) troe_str += _troe_and_cheb_params('TROE', troe_params, newline=True, val='exp') # Write the collider efficiencies string if colliders: troe_str += _format_collider_string(colliders) return troe_str def lindemann(reaction, high_params, low_params, colliders=None, max_length=45, name_buffer=BUFFER): """ Write the string containing the Lindemann fitting parameters formatted for Chemkin input files :param reaction: Chemkin formatted string with chemical equation :type reaction: str :param high_params: Arrhenius high-P parameters :type high_params: list of floats :param low_params: Arrhenius low-P parameters :type low_params: list of floats :param colliders: names and collision enhancement factors for bath gases :type colliders: list((str, float)) :return lind_str: Chemkin reaction string with Lindemann parameters :rtype: str """ lind_str = _highp_str(reaction, high_params, max_length=max_length, name_buffer=BUFFER) lind_str += _lowp_str(reaction, low_params, max_length=max_length, name_buffer=BUFFER) # Write the collider efficiencies string if colliders: lind_str += _format_collider_string(colliders) return lind_str def plog(reaction, plog_param_dct, max_length=45, name_buffer=BUFFER): """ Write the string containing the PLOG fitting parameters formatted for Chemkin input files. :param reaction: Chemkin formatted string with chemical equation :type reaction: str :param plog_param_dct: Arrhenius fitting parameters at all pressures :type plog_param_dct: dict{pressure: [Arrhenius params]} :param max_length: length of the longest reaction name in the mechanism :type max_length: int :param name_buffer: buffer between the name and the Arrhenius params :type name_buffer: int :return plog_str: Chemkin reaction string with PLOG parameters :rtype: str """ def _pressure_str(pressure, params, max_length=45, name_buffer=BUFFER): """ Write a line in a PLOG string :param pressure: pressure at which to write the string :type pressure: float :param params: Arrhenius parameters at the specified pressure :type params: list of floats :param max_length: length of the longest reaction name in the mechanism :type max_length: int :param name_buffer: buffer between the name and the Arrhenius params :type name_buffer: int :return single_plog_str: Chemkin reaction string with PLOG parameters at a single pressure :rtype: str """ plog_buffer = str(max_length+name_buffer-12) [a_par, n_par, ea_par] = params single_plog_str = ('{0:<' + plog_buffer + 's}{1:<12.3E}{2:<10.3E}{3:>9.3f}{4:>9.0f} /\n').format( ' PLOG /', pressure, a_par, n_par, ea_par) return single_plog_str # Obtain a list of the pressures and sort from low to high pressure unsorted_pressures = plog_param_dct.keys() pressures = sorted(unsorted_pressures) # Write the header for the reaction, which includes the 1-atm fit if available if 1 in pressures: if len(plog_param_dct[1]) > 3: comment = 'Duplicates exist at 1 atm (see below); only a single 1-atm fit is written here' plog_str = _highp_str(reaction, plog_param_dct[1][:3], max_length=max_length, name_buffer=name_buffer, inline_comment=comment ) else: comment = 'Arrhenius parameters at 1 atm' plog_str = _highp_str(reaction, plog_param_dct[1], max_length=max_length, name_buffer=name_buffer, inline_comment=comment ) else: comment = 'No fit at 1 atm available' plog_str = _highp_str(reaction, [1.0, 0.0, 0.0], max_length=max_length, name_buffer=name_buffer, inline_comment=comment ) # Loop over each pressure for pressure in pressures: plog_params = plog_param_dct[pressure] assert len(plog_params) % 3 == 0, ( f'The number of Arrhenius params should be a multiple of 3 but is rather {len(plog_params)} for {reaction}' ) # Loop over however many Arrhenius sets there are, writing a PLOG line for each num_arr_sets = int(len(plog_params)/3) for idx in range(num_arr_sets): current_plog_params = plog_params[3*idx:3*(idx+1)] plog_str += _pressure_str(pressure, current_plog_params, max_length=max_length, name_buffer=name_buffer) return plog_str def chebyshev(reaction, one_atm_params, alpha, tmin, tmax, pmin, pmax, max_length=45, name_buffer=BUFFER): """ Write the string containing the Chebyshev fitting parameters formatted for Chemkin input files. :param reaction: Chemkin formatted string with chemical equation :type reaction: str :param one_atm_params: Arrhenius parameters at 1 atm :type one_atm_params: list of floats :param alpha: Chebyshev coefficient matrix :type alpha: numpy.ndarray :param tmin: minimum temperature Chebyshev model is defined :type tmin: float :param tmax: maximum temperature Chebyshev model is defined :type tmax: float :param pmin: minimum pressure Chebyshev model is defined :type pmin: float :param pmax: maximum pressure Chebyshev model is defined :type pmax: float :param max_length: length of the longest reaction name in the mechanism :type max_length: int :param name_buffer: buffer between the name and the Arrhenius params :type name_buffer: int :return cheb_str: Chemkin reaction string with Chebyshev parameters :rtype: str """ assert len(one_atm_params) == 3, ( f'For {reaction}, the length of one_atm_params is {len(one_atm_params)} instead of 3' ) # Write reaction header (with third body added) and high-pressure params reaction = _format_rxn_str_for_pdep(reaction, pressure='all') if one_atm_params != [1.0, 0.0, 0.0]: # if the params look real comment = 'Arrhenius parameters at 1 atm' else: # if the params look fake comment = None cheb_str = _highp_str(reaction, one_atm_params, max_length=max_length, name_buffer=name_buffer, inline_comment=comment ) # Write the temperature and pressure ranges cheb_str += _troe_and_cheb_params('TCHEB', (tmin, tmax), newline=True, val='float') cheb_str += _troe_and_cheb_params('PCHEB', (pmin, pmax), newline=True, val='float') # Write the dimensions of the alpha matrix nrows = len(alpha) ncols = len(alpha[0]) cheb_str += _troe_and_cheb_params('CHEB', (nrows, ncols), newline=True, val='int') # Write the parameters from the alpha matrix for row in alpha: cheb_str += _troe_and_cheb_params('CHEB', row, newline=True, val='exp') return cheb_str def arrhenius(reaction, high_params, colliders=None, max_length=45, name_buffer=BUFFER): """ Write the string containing the Arrhenius fitting parameters formatted for Chemkin input files :param reaction: Chemkin formatted string with chemical equation :type reaction: str :param high_params: Arrhenius high-P (i.e., high-P) parameters :type high_params: list of floats :param colliders: names and collision enhancement factors for bath gases :type colliders: list((str, float)) :param max_length: length of the longest reaction name in the mechanism :type max_length: int :param name_buffer: buffer between the name and the Arrhenius params :type name_buffer: int :return arr_str: Chemkin reaction string with Arrhenius parameters :rtype: str """ assert len(high_params) % 3 == 0, ( f'The number of Arrhenius params should be a multiple of 3 but is rather {len(high_params)} for {reaction}' ) # Loop over each set of three Arrhenius parameters and write to a string num_arr_sets = int(len(high_params)/3) arr_str = '' for idx in range(num_arr_sets): current_high_params = high_params[3*idx:3*(idx+1)] arr_str += _highp_str(reaction, current_high_params, max_length=max_length, name_buffer=name_buffer) if num_arr_sets > 1: arr_str += ' DUP \n' # Write the collider efficiencies string if colliders: arr_str += _format_collider_string(colliders) return arr_str # Various formatting functions def fit_info(pressures, temp_dct, err_dct): """ Write the string detailing the temperature ranges and fitting errors associated with the rate-constant fits at each pressure. :param pressures: pressures the k(T,P)s were calculated at :type pressures: list(float) :param temp_dct: temperature ranges (K) fits were done at each pressure :type temp_dct: dict[pressure, [temp range]] :param err_dct: errors associated with the fits at each pressure :type err_dct: dict[pressure, [mean err, max err]] :return inf_str: string containing all of the fitting info :rtype: str """ # Make temp, err dcts empty if fxn receives None; add 'high' to pressures temp_dct = temp_dct if temp_dct else {} err_dct = err_dct if err_dct else {} if 'high' in temp_dct or 'high' in err_dct: pressures += ['high'] # Check the temp and err dcts have same presures as rate_dcts if temp_dct: assert set(pressures) == set(temp_dct.keys()) err_dct = err_dct if err_dct else {} if err_dct: assert set(pressures) == set(err_dct.keys()) # Write string showing the temp fit range and fit
self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) fout.write("\t\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 7: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_nmr_interpolator_conv_info: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_nmr_interpolator_conv_info_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&CONV_INFO\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END CONV_INFO\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[5] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_nmr_interpolator: def __init__(self): self.params = { } self.status = False self.conv_info = cp2k_properties_linres_nmr_interpolator_conv_info() # basic setting def to_input(self, fout): fout.write("\t\t\t\t&INTERPOLATOR\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.conv_info.status == True: self.conv_info.to_input(fout) fout.write("\t\t\t\t&END INTERPOLATOR\n") def set_params(self, params): for item in params: if len(item.split("-")) == 5: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[4] == "CONV_INFO": self.conv_info.set_params({item: params[item]}) else: pass class cp2k_properties_linres_nmr_print_chi_tensor_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) fout.write("\t\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 7: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_nmr_print_chi_tensor: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_nmr_print_chi_tensor_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&CHI_TENSOR\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END CHI_TENSOR\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[5] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_nmr_print_response_function_cubes_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) fout.write("\t\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 7: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_nmr_print_response_function_cubes: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_nmr_print_response_function_cubes_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&RESPONSE_FUNCTION_CUBES\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END RESPONSE_FUNCTION_CUBES\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[5] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_nmr_print_shielding_tensor_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) fout.write("\t\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 7: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_nmr_print_shielding_tensor: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_nmr_print_shielding_tensor_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&SHIELDING_TENSOR\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END SHIELDING_TENSOR\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[5] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_nmr_print: def __init__(self): self.params = { } self.status = False self.chi_tensor = cp2k_properties_linres_nmr_print_response_function_cubes() self.response_function_cubes = cp2k_properties_linres_nmr_print_response_function_cubes() self.shielding_tensor = cp2k_properties_linres_nmr_print_shielding_tensor() # basic setting def to_input(self, fout): fout.write("\t\t\t\t&PRINT\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.chi_tensor.status == True: self.chi_tensor.to_input(fout) if self.response_function_cubes.status == True: self.response_function_cubes.to_input(fout) if self.shielding_ternsor.status == True: self.shielding_tensor.to_input(fout) fout.write("\t\t\t\t&END PRINT\n") def set_params(self, params): for item in params: if len(item.split("-")) == 5: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[4] == "CHI_TENSOR": self.chi_tensor.set_params({item: params[item]}) elif item.split("-")[4] == "RESPONSE_FUNCTION_CUBES": self.response_function_cubes.set_params({item: params[item]}) elif item.split("-")[4] == "SHIELDING_TENSOR": self.shielding_tensor.set_params({item: params[item]}) else: pass class cp2k_properties_linres_nmr: def __init__(self): self.params = { } self.status = False self.interpolator = cp2k_properties_linres_nmr_interpolator self.printout =cp2k_properties_linres_nmr_print() # basic setting def to_input(self, fout): fout.write("\t\t\t&NMR\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.interpolator.status == True: self.interpolator.to_input(fout) if self.printout.status == True: self.printout.to_input(fout) fout.write("\t\t\t&END NMR\n") def set_params(self, params): for item in params: if len(item.split("-")) == 4: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[3] == "INTERPOLATOR": self.interpolator.set_params({item: params[item]}) elif item.split("-")[3] == "PRINT": self.printout.set_params({item: params[item]}) else: pass class cp2k_properties_linres_polar_interpolator_conv_info_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) fout.write("\t\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 7: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_polar_interpolator_conv_info: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_polar_interpolator_conv_info_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&CONV_INFO\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END CONV_INFO\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[5] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_polar_interpolator: def __init__(self): self.params = { } self.status = False self.conv_info = cp2k_properties_linres_polar_interpolator_conv_info() # basic setting def to_input(self, fout): fout.write("\t\t\t\t&INTERPOLATOR\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.conv_info.status == True: self.conv_info.to_input(fout) fout.write("\t\t\t\t&END INTERPOLATOR\n") def set_params(self, params): for item in params: if len(item.split("-")) == 5: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[4] == "CONV_INFO": self.conv_info.set_params({item: params[item]}) else: pass class cp2k_properties_linres_polar_print_polar_matrix_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) fout.write("\t\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 7: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_polar_print_polar_matrix: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_polar_print_polar_matrix_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&POLAR_MATRIX\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END POLAR_MATRIX\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[5] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_polar_print: def __init__(self): self.params = { } self.status = False self.polar_matrix = cp2k_properties_linres_polar_print_polar_matrix() # basic setting def to_input(self, fout): fout.write("\t\t\t\t&PRINT\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.polar_matrix.status == True: self.polar_matrix.to_input(fout) fout.write("\t\t\t\t&END PRINT\n") def set_params(self, params): for item in params: if len(item.split("-")) == 5: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[4] == "POLAR_MATRIX": self.polar_matrix.set_params({item: params[item]}) else: pass class cp2k_properties_linres_polar: def __init__(self): self.params = { } self.status = False self.interpolator = cp2k_properties_linres_polar_interpolator() self.printout = cp2k_properties_linres_polar_print() # basic setting def to_input(self, fout): fout.write("\t\t\t&POLAR\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.interpolator.status == True: self.interpolator.to_input(fout) if self.printout.status == True: self.printout.to_input(fout) fout.write("\t\t\t&END POLAR\n") def set_params(self, params): for item in params: if len(item.split("-")) == 4: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[3] == "INTERPOLATOR": self.interpolator.set_params({item: params[item]}) elif item.split("-")[3] == "PRINT": self.printout.set_params({item: params[item]}) else: pass class cp2k_properties_linres_print_program_run_info_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_print_program_run_info: def __init__(self): self.params = { } self.status = False self.each = cp2k_properties_linres_print_program_run_info_each() # basic setting def to_input(self, fout): fout.write("\t\t\t\t&PROGRAM_RUN_INFO\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t&END PROGRAM_RUN_INFO\n") def set_params(self, params): for item in params: if len(item.split("-")) == 5: self.params[item.split("-")[-1]] = params[item] elif item.split("-")[4] == "EACH": self.each.set_params({item: params[item]}) else: pass class cp2k_properties_linres_print_restart_each: def __init__(self): self.params = { } self.status = False # basic setting def to_input(self, fout): fout.write("\t\t\t\t\t&EACH\n") for item in self.params: if self.params[item] is not None: fout.write("\t\t\t\t\t\t%s %s\n" % (item, str(self.params[item]))) if self.each.status == True: self.each.to_input(fout) fout.write("\t\t\t\t\t&END EACH\n") def set_params(self, params): for item in params: if len(item.split("-")) == 6: self.params[item.split("-")[-1]] = params[item] else: pass class cp2k_properties_linres_print_restart: def __init__(self): self.params = { } self.status = False
# Probabilitybuckets # reimplemented by <NAME> (<EMAIL>) # according to # "Privacy Buckets: Upper and Lower Bounds for r-Fold Approximate Differential Privacy" # (https://eprint.iacr.org/2017/1034, version 2018 May 8th) import logging import numpy as np from scipy import optimize import copy import gc import os import pickle import errno import xxhash _infty_bucket_warning_bound = 1e-5 _virtual_error_warning_bound = 1e-3 class ProbabilityBuckets: def __init__(self, number_of_buckets = 100000, factor = None, dist1_array = None, dist2_array = None, caching_directory = None, free_infty_budget = 10**(-20), logging_level = logging.INFO, error_correction = True, skip_bucketing = False, **kwargs): self.logging_level = logging_level self.logger_setup(level=self.logging_level) for key in kwargs: self.logger.warning( "Warning: option {} not implemented".format(key) ) # infty_bucket is excluded self.number_of_buckets = self.number_of_buckets = int(4 * (number_of_buckets // 4) ) + 1 self.factor = np.float64(factor) self.log_factor = np.log(factor, dtype=np.float64) # arbitrarily chosen self.squaring_threshold_factor = np.float64(1.1) self.free_infty_budget = np.float64(free_infty_budget) # in case of skip_bucketing = True, caching_setup() has to be called by the derived class as caching depends on a filled self.bucket_distribution self.caching_super_directory = caching_directory self.caching_directory = None # will be set inside self.caching_setup() if self.caching_super_directory != None # skip bucketing if something else (e.g. derived class) creates buckets if not skip_bucketing: self.create_bucket_distribution(dist1_array, dist2_array, error_correction) self.caching_setup() def caching_setup(self): # setting up caching. Hashing beginning bucket_distribution to avoid name collisions if self.caching_super_directory: hasher = xxhash.xxh64(self.bucket_distribution, seed=0) hasher.update(str(self.error_correction)) hasher.update(str(self.free_infty_budget)) array_name = hasher.hexdigest() self.caching_directory = os.path.join(self.caching_super_directory, array_name) self.logger.info("Caching directory: {}".format(self.caching_directory)) def logger_setup(self, level): self.logger = logging.getLogger(__name__) # all instances use the same logger. Randomize the name if not appreciated if not len(self.logger.handlers): self.logger.setLevel(level=level) ch = logging.StreamHandler() ch.setLevel(level=level) ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')) self.logger.addHandler(ch) def create_bucket_distribution(self, distr1, distr2, error_correction): # # For explanation of variables, see comment at the beginning of method "self.compose_with" # self.logger.info("Create bucket distribution") assert len(distr1) == len(distr2) distr1 = np.array(distr1, dtype=np.float64) distr2 = np.array(distr2, dtype=np.float64) self.bucket_distribution = np.zeros(self.number_of_buckets, dtype=np.float64) infty_mask = (distr2 == 0) null_mask = (distr1 == 0) indices = np.ma.log(np.divide(distr1, distr2, where=~infty_mask))/self.log_factor + self.number_of_buckets//2 indices = np.ceil(indices).astype(int) # set up error correction self.error_correction = error_correction virtual_error = np.zeros(self.number_of_buckets, dtype=np.float64) if self.error_correction: # we want errors(x,i) = P_B(x) - P_A(x) / f**i = P_B(x) - exp( log( P_A(x) ) - i * log_factor ) errors = distr2 - distr1 / self.factor**(indices - self.number_of_buckets//2) # errors = distr2 - np.exp(np.ma.log(distr1, dtype=np.float64) - (indices - self.number_of_buckets//2) * self.log_factor, dtype=np.float64) else: errors = np.zeros(len(distr1), dtype=np.float64) # fill buckets self.infty_bucket = np.float64(0.0) self.distinguishing_events = np.float64(0.0) for i, m_infty, m_null, a, err in zip(indices, infty_mask, null_mask, distr1, errors): if m_infty: self.distinguishing_events += a # self.infty_bucket += a continue if m_null: continue # i = int(np.ceil(i)) if i >= self.number_of_buckets: self.infty_bucket += a continue if i < 0: self.bucket_distribution[0] += a virtual_error[0] += err continue self.bucket_distribution[i] += a virtual_error[i] += err self.one_index = int(self.number_of_buckets // 2) self.u = np.int64(1) if self.error_correction: self.virtual_error = virtual_error self.real_error = self.virtual_error.copy() self.real_error[0] = 0.0 if self.infty_bucket > _infty_bucket_warning_bound: self.logger.warning("Infty bucket (numerical errors) is above {:g}. " "This error will exponentiate over compositions. " "Decrease factor or increase number of buckets to avoid this.".format(_infty_bucket_warning_bound)) def squaring(self): self.logger.info("Squaring") # collapse puts every two consecutive buckets in a single bucket collapse = lambda arr: np.sum(arr.reshape( len(arr) // 2, 2), axis=1) if self.error_correction: self.logger.debug(" Error correction.") # n_half_p1_to_n_half = [ -n/2+1, .. , n/2 ] # By adding self.one_index to this array, we get the corresponding self.bucket_distribution indices. # assumptions on next line: self.one_index % 2 == 1 and (self.number_of_buckets - 1) % 4 == 0 n_half_p1_to_n_half = np.arange(- (self.one_index//2) + 1, self.one_index // 2 + 1) n_half_p1_to_n_half_addr = slice(n_half_p1_to_n_half[0] + self.one_index, n_half_p1_to_n_half[-1] + self.one_index + 1) assert -n_half_p1_to_n_half[0] + 1 == n_half_p1_to_n_half[-1] # sanity check. If failing: num_of_buckets invalid # div(i) = (1/f**(2i-1) - 1/f**(2i) ) div = lambda i: (1.0 / self.factor**( 2 * i - 1) ) - ( 1.0 / self.factor**( 2 * i ) ) div_arr = div( n_half_p1_to_n_half ) def square_error(array): temp_error = np.zeros(self.number_of_buckets, dtype=np.float64) temp_error[n_half_p1_to_n_half_addr] = collapse(array[1:]) # here we add B(i) * (1/f**(2i-1) - 1/f**(2i) ) for every i in [-n/2+1, n/2] temp_error[n_half_p1_to_n_half_addr] += self.bucket_distribution[ (n_half_p1_to_n_half * 2) - 1 + self.one_index] * div_arr return temp_error self.real_error = square_error(self.real_error) temp_virtual_error = square_error(self.virtual_error) temp_virtual_error[self.one_index // 2] = self.virtual_error[0] self.virtual_error = temp_virtual_error self.u += 1 temp_bucket_distribution = np.zeros(self.number_of_buckets) lower_half, upper_half = self.split_array_equally(self.bucket_distribution) temp_bucket_distribution[self.one_index // 2:self.one_index] = collapse(lower_half) temp_bucket_distribution[self.one_index + 1:self.one_index + self.one_index // 2 + 1] = collapse(upper_half) temp_bucket_distribution[self.one_index] = self.bucket_distribution[self.one_index] self.bucket_distribution = temp_bucket_distribution self.log_factor *= 2 self.factor = np.exp(self.log_factor) gc.collect() def opt_compose_with(self, probability_buckets, after_squaring = False, threshold_factor = None): assert(after_squaring == False and threshold_factor == None), "Function is being called in an unsupported way. We should fix this." return self.compose_with(probability_buckets) def compose_with(self, pb, allow_modify_instance=False): self.logger.info("Composing") assert self.number_of_buckets == pb.number_of_buckets assert self.factor == pb.factor, "ERROR, trying to compose distributions with different factors" # we want to compute the product of the two probability buckets instances A and B: I_A.I_B ( . denotes a "inner" product) # let A (or B) denote the bucketlist and A_infty (or B_infty) denote the infinty bucket: I_A = (A, A_infty), I_B = (B, B_infty) # so, sligthly informal, we do # # I_A.I_B = (A, A_infty).(B, B_infty) # = A.B + sum(A) * B_infty + A_infty * sum(B) + A_infty * B_infty ( * denotes scalar multiplication) # = A.B + (1-A_infty) * B_infty + A_infty * (1 - B_infty) + A_infty * B_infty ( using 1 = sum(B) + B_infty) # = A.B + A_infty + B_infty - A_infty * B_infty # # as in the computation of A.B under- and overflows can occur, we get in the end six terms: # # I_A.I_B = (A.B)_under + A.B + (A.B)_over + A_infty + B_infty - A_infty * B_infty # # The first term goes to the first bucket, the second is the bucket distribution, and the last 4 terms we pack in the infty bucket # # UPDATE: there are now two "infty buckets": # self.distinguishing_events - keeps track of the real distinguishing events # self.infty_bucket - containing numerical overswap of A.B, and the mix terms of disting_events and infty_bucket. # Technically, there are two terms added to self.infty_bucket: -A_infty*B_dist - A_dist*B_infty # resulting from (A, A_infty, A_dist).(B, B_infty, B_dist) . # add "A_dist + B_dist - A_dist * B_dist" self.distinguishing_events = self.distinguishing_events + pb.distinguishing_events - (self.distinguishing_events * pb.distinguishing_events) # "A_infty + B_infty - A_infty * B_infty - A_infty*B_dist - A_dist*B_infty" temp_infty_bucket = np.float64(0) temp_infty_bucket += self.infty_bucket + pb.infty_bucket - (self.infty_bucket * pb.infty_bucket) temp_infty_bucket += - self.infty_bucket * pb.distinguishing_events - self.distinguishing_events * pb.infty_bucket temp_bucket_distribution = np.zeros(self.number_of_buckets, dtype=np.float64) while True: delta_infty_bucket = np.float64(0.0) # calculate (A.B)_over self.logger.info(" Compute (A.B)_over") temp_over = self.convolve_full(self.bucket_distribution[self.one_index+1:], pb.bucket_distribution[self.one_index+1:]) # add all infty parts together: "(A.B)_over + A_infty + B_infty - A_infty * B_infty" delta_infty_bucket = np.sum(temp_over[self.one_index-1:]) + temp_infty_bucket max_growth_allowed = self.squaring_threshold_factor * (self.infty_bucket + pb.infty_bucket) if delta_infty_bucket >= self.free_infty_budget and delta_infty_bucket > max_growth_allowed: self.squaring() if self.bucket_distribution is not pb.bucket_distribution: if not allow_modify_instance: # make a copy so we do not change the original instance pb = pb.copy() allow_modify_instance = True pb.squaring() continue break # compute all intermediate buckets "A.B" self.logger.info(" Compute (A.B)") temp_bucket_distribution[1:] = self.convolve_same(self.bucket_distribution, pb.bucket_distribution)[1:] # compute the first bucket (A.B)_under self.logger.info(" Compute (A.B)_under") temp_under = self.convolve_full(self.bucket_distribution[0:self.one_index+1], pb.bucket_distribution[0:pb.one_index+1]) temp_under = np.sum(temp_under[0:self.one_index+1]) temp_bucket_distribution[0] = max(0,temp_under) if self.error_correction: assert pb.error_correction convert_to_B = lambda distribution, factors: distribution / factors # factors = self.factor ** np.arange(-self.one_index, self.one_index + 1 ) # how numerically stable is that? factors = np.exp(self.log_factor * np.arange(-self.one_index, self.one_index + 1 ) ) temp_buck_distr_B_self = convert_to_B(self.bucket_distribution, factors) temp_buck_distr_B_pb = convert_to_B(pb.bucket_distribution, factors) temp_buck_distr_B_convolved = convert_to_B(temp_bucket_distribution, factors) # As l(i) = sum_{k+j=i} B^A_j/f**j * l^B(k) + B^A_k/f**k * l^B(j) + l^A(j) * l^B(k) # = sum_{k+j=i} ( B^A_j/f**j + l^A(j) ) * ( B^B_k/f**k + l^B(k) ) - B^A_j/f**j * B^B_k/f**k # We compute the latter one because it involves only one convolution and the substraction term we already know self.logger.info(" Compute real_error A.B") self.real_error = self.convolve_same(temp_buck_distr_B_self + self.real_error, temp_buck_distr_B_pb + pb.real_error) - temp_buck_distr_B_convolved self.real_error[0] = 0 self.logger.info(" Compute virtual_error
#!/usr/bin/env python """\ @file simperf_host_xml_parser.py @brief Digest collector's XML dump and convert to simple dict/list structure $LicenseInfo:firstyear=2008&license=mit$ Copyright (c) 2008-2009, Linden Research, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. $/LicenseInfo$ """ import sys, os, getopt, time import simplejson from xml import sax def usage(): print "Usage:" print sys.argv[0] + " [options]" print " Convert RRD's XML dump to JSON. Script to convert the simperf_host_collector-" print " generated RRD dump into JSON. Steps include converting selected named" print " fields from GAUGE type to COUNTER type by computing delta with preceding" print " values. Top-level named fields are:" print print " lastupdate Time (javascript timestamp) of last data sample" print " step Time in seconds between samples" print " ds Data specification (name/type) for each column" print " database Table of data samples, one time step per row" print print "Options:" print " -i, --in Input settings filename. (Default: stdin)" print " -o, --out Output settings filename. (Default: stdout)" print " -h, --help Print this message and exit." print print "Example: %s -i rrddump.xml -o rrddump.json" % sys.argv[0] print print "Interfaces:" print " class SimPerfHostXMLParser() # SAX content handler" print " def simperf_host_xml_fixup(parser) # post-parse value fixup" class SimPerfHostXMLParser(sax.handler.ContentHandler): def __init__(self): pass def startDocument(self): self.rrd_last_update = 0 # public self.rrd_step = 0 # public self.rrd_ds = [] # public self.rrd_records = [] # public self._rrd_level = 0 self._rrd_parse_state = 0 self._rrd_chars = "" self._rrd_capture = False self._rrd_ds_val = {} self._rrd_data_row = [] self._rrd_data_row_has_nan = False def endDocument(self): pass # Nasty little ad-hoc state machine to extract the elements that are # necessary from the 'rrdtool dump' XML output. The same element # name '<ds>' is used for two different data sets so we need to pay # some attention to the actual structure to get the ones we want # and ignore the ones we don't. def startElement(self, name, attrs): self._rrd_level = self._rrd_level + 1 self._rrd_capture = False if self._rrd_level == 1: if name == "rrd" and self._rrd_parse_state == 0: self._rrd_parse_state = 1 # In <rrd> self._rrd_capture = True self._rrd_chars = "" elif self._rrd_level == 2: if self._rrd_parse_state == 1: if name == "lastupdate": self._rrd_parse_state = 2 # In <rrd><lastupdate> self._rrd_capture = True self._rrd_chars = "" elif name == "step": self._rrd_parse_state = 3 # In <rrd><step> self._rrd_capture = True self._rrd_chars = "" elif name == "ds": self._rrd_parse_state = 4 # In <rrd><ds> self._rrd_ds_val = {} self._rrd_chars = "" elif name == "rra": self._rrd_parse_state = 5 # In <rrd><rra> elif self._rrd_level == 3: if self._rrd_parse_state == 4: if name == "name": self._rrd_parse_state = 6 # In <rrd><ds><name> self._rrd_capture = True self._rrd_chars = "" elif name == "type": self._rrd_parse_state = 7 # In <rrd><ds><type> self._rrd_capture = True self._rrd_chars = "" elif self._rrd_parse_state == 5: if name == "database": self._rrd_parse_state = 8 # In <rrd><rra><database> elif self._rrd_level == 4: if self._rrd_parse_state == 8: if name == "row": self._rrd_parse_state = 9 # In <rrd><rra><database><row> self._rrd_data_row = [] self._rrd_data_row_has_nan = False elif self._rrd_level == 5: if self._rrd_parse_state == 9: if name == "v": self._rrd_parse_state = 10 # In <rrd><rra><database><row><v> self._rrd_capture = True self._rrd_chars = "" def endElement(self, name): self._rrd_capture = False if self._rrd_parse_state == 10: self._rrd_capture = self._rrd_level == 6 if self._rrd_level == 5: if self._rrd_chars == "NaN": self._rrd_data_row_has_nan = True else: self._rrd_data_row.append(self._rrd_chars) self._rrd_parse_state = 9 # In <rrd><rra><database><row> elif self._rrd_parse_state == 9: if self._rrd_level == 4: if not self._rrd_data_row_has_nan: self.rrd_records.append(self._rrd_data_row) self._rrd_parse_state = 8 # In <rrd><rra><database> elif self._rrd_parse_state == 8: if self._rrd_level == 3: self._rrd_parse_state = 5 # In <rrd><rra> elif self._rrd_parse_state == 7: if self._rrd_level == 3: self._rrd_ds_val["type"] = self._rrd_chars self._rrd_parse_state = 4 # In <rrd><ds> elif self._rrd_parse_state == 6: if self._rrd_level == 3: self._rrd_ds_val["name"] = self._rrd_chars self._rrd_parse_state = 4 # In <rrd><ds> elif self._rrd_parse_state == 5: if self._rrd_level == 2: self._rrd_parse_state = 1 # In <rrd> elif self._rrd_parse_state == 4: if self._rrd_level == 2: self.rrd_ds.append(self._rrd_ds_val) self._rrd_parse_state = 1 # In <rrd> elif self._rrd_parse_state == 3: if self._rrd_level == 2: self.rrd_step = long(self._rrd_chars) self._rrd_parse_state = 1 # In <rrd> elif self._rrd_parse_state == 2: if self._rrd_level == 2: self.rrd_last_update = long(self._rrd_chars) self._rrd_parse_state = 1 # In <rrd> elif self._rrd_parse_state == 1: if self._rrd_level == 1: self._rrd_parse_state = 0 # At top if self._rrd_level: self._rrd_level = self._rrd_level - 1 def characters(self, content): if self._rrd_capture: self._rrd_chars = self._rrd_chars + content.strip() def _make_numeric(value): try: value = float(value) except: value = "" return value def simperf_host_xml_fixup(parser, filter_start_time = None, filter_end_time = None): # Fixup for GAUGE fields that are really COUNTS. They # were forced to GAUGE to try to disable rrdtool's # data interpolation/extrapolation for non-uniform time # samples. fixup_tags = [ "cpu_user", "cpu_nice", "cpu_sys", "cpu_idle", "cpu_waitio", "cpu_intr", # "file_active", # "file_free", # "inode_active", # "inode_free", "netif_in_kb", "netif_in_pkts", "netif_in_errs", "netif_in_drop", "netif_out_kb", "netif_out_pkts", "netif_out_errs", "netif_out_drop", "vm_page_in", "vm_page_out", "vm_swap_in", "vm_swap_out", #"vm_mem_total", #"vm_mem_used", #"vm_mem_active", #"vm_mem_inactive", #"vm_mem_free", #"vm_mem_buffer", #"vm_swap_cache", #"vm_swap_total", #"vm_swap_used", #"vm_swap_free", "cpu_interrupts", "cpu_switches", "cpu_forks" ] col_count = len(parser.rrd_ds) row_count = len(parser.rrd_records) # Process the last row separately, just to make all values numeric. for j in range(col_count): parser.rrd_records[row_count - 1][j] = _make_numeric(parser.rrd_records[row_count - 1][j]) # Process all other row/columns. last_different_row = row_count - 1 current_row = row_count - 2 while current_row >= 0: # Check for a different value than the previous row. If everything is the same # then this is probably just a filler/bogus entry. is_different = False for j in range(col_count): parser.rrd_records[current_row][j] = _make_numeric(parser.rrd_records[current_row][j]) if parser.rrd_records[current_row][j] != parser.rrd_records[last_different_row][j]: # We're good. This is a different row. is_different = True if not is_different: # This is a filler/bogus entry. Just ignore it. for j in range(col_count): parser.rrd_records[current_row][j] = float('nan') else: # Some tags need to be converted into deltas. for j in range(col_count): if parser.rrd_ds[j]["name"] in fixup_tags: parser.rrd_records[last_different_row][j] = \ parser.rrd_records[last_different_row][j] - parser.rrd_records[current_row][j] last_different_row = current_row current_row -= 1 # Set fixup_tags in the first row to 'nan' since they aren't useful anymore. for j in range(col_count): if parser.rrd_ds[j]["name"] in fixup_tags: parser.rrd_records[0][j] = float('nan') # Add a timestamp to each row and to the catalog. Format and name # chosen to match other simulator logging (hopefully). start_time = parser.rrd_last_update - (parser.rrd_step * (row_count - 1)) # Build a filtered list of rrd_records if we are limited to a time range. filter_records = False if filter_start_time is not None or filter_end_time is not None: filter_records = True filtered_rrd_records = [] if filter_start_time is None: filter_start_time = start_time * 1000 if filter_end_time is None: filter_end_time = parser.rrd_last_update * 1000 for i in range(row_count): record_timestamp = (start_time + (i * parser.rrd_step)) * 1000 parser.rrd_records[i].insert(0, record_timestamp) if filter_records: if filter_start_time <= record_timestamp and record_timestamp <= filter_end_time: filtered_rrd_records.append(parser.rrd_records[i]) if filter_records: parser.rrd_records = filtered_rrd_records parser.rrd_ds.insert(0, {"type": "GAUGE", "name": "javascript_timestamp"}) def main(argv=None): opts, args = getopt.getopt(sys.argv[1:], "i:o:h", ["in=", "out=", "help"]) input_file = sys.stdin output_file = sys.stdout for o, a in opts: if o in ("-i", "--in"): input_file = open(a, 'r') if o in ("-o", "--out"): output_file = open(a, 'w') if o in ("-h", "--help"): usage() sys.exit(0) # Using the SAX parser as it is at least 4X faster and far, far # smaller
<filename>homeassistant/components/ais_shell_command/__init__.py """ Exposes regular shell commands as services. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/shell_command/ """ from homeassistant.const import CONF_IP_ADDRESS, CONF_MAC import asyncio import logging import os import homeassistant.components.ais_dom.ais_global as ais_global DOMAIN = "ais_shell_command" GLOBAL_X = 0 _LOGGER = logging.getLogger(__name__) @asyncio.coroutine def async_setup(hass, config): """Register the service.""" config = config.get(DOMAIN, {}) @asyncio.coroutine def change_host_name(service): yield from _change_host_name(hass, service) @asyncio.coroutine def execute_command(service): yield from _execute_command(hass, service) @asyncio.coroutine def execute_script(service): yield from _execute_script(hass, service) @asyncio.coroutine def execute_restart(service): yield from _execute_restart(hass, service) @asyncio.coroutine def execute_stop(service): yield from _execute_stop(hass, service) @asyncio.coroutine def key_event(service): yield from _key_event(hass, service) @asyncio.coroutine def scan_network_for_devices(service): yield from _scan_network_for_devices(hass, service) @asyncio.coroutine def scan_device(service): yield from _scan_device(hass, service) @asyncio.coroutine def show_network_devices_info(service): yield from _show_network_devices_info(hass, service) @asyncio.coroutine def led(service): yield from _led(hass, service) @asyncio.coroutine def init_local_sdcard(service): yield from _init_local_sdcard(hass, service) @asyncio.coroutine def flush_logs(service): yield from _flush_logs(hass, service) @asyncio.coroutine def ssh_remote_access(service): yield from _ssh_remote_access(hass, service) @asyncio.coroutine def set_ais_secure_android_id_dom(service): yield from _set_ais_secure_android_id_dom(hass, service) @asyncio.coroutine def hdmi_control_disable(service): yield from _hdmi_control_disable(hass, service) @asyncio.coroutine def hdmi_control_disable(service): yield from _hdmi_control_disable(hass, service) @asyncio.coroutine def change_wm_overscan(service): yield from _change_wm_overscan(hass, service) @asyncio.coroutine def disable_irda_remote(service): yield from _disable_irda_remote(hass, service) def change_remote_access(service): _change_remote_access(hass, service) # register services hass.services.async_register(DOMAIN, "change_host_name", change_host_name) hass.services.async_register(DOMAIN, "execute_command", execute_command) hass.services.async_register(DOMAIN, "execute_script", execute_script) hass.services.async_register(DOMAIN, "execute_restart", execute_restart) hass.services.async_register(DOMAIN, "execute_stop", execute_stop) hass.services.async_register(DOMAIN, "key_event", key_event) hass.services.async_register( DOMAIN, "scan_network_for_devices", scan_network_for_devices ) hass.services.async_register(DOMAIN, "scan_device", scan_device) hass.services.async_register( DOMAIN, "show_network_devices_info", show_network_devices_info ) hass.services.async_register(DOMAIN, "led", led) hass.services.async_register( DOMAIN, "set_ais_secure_android_id_dom", set_ais_secure_android_id_dom ) hass.services.async_register(DOMAIN, "init_local_sdcard", init_local_sdcard) hass.services.async_register(DOMAIN, "flush_logs", flush_logs) hass.services.async_register(DOMAIN, "change_remote_access", change_remote_access) hass.services.async_register(DOMAIN, "ssh_remote_access", ssh_remote_access) hass.services.async_register(DOMAIN, "hdmi_control_disable", hdmi_control_disable) hass.services.async_register(DOMAIN, "change_wm_overscan", change_wm_overscan) hass.services.async_register(DOMAIN, "disable_irda_remote", disable_irda_remote) return True @asyncio.coroutine def _change_host_name(hass, call): if "hostname" not in call.data: _LOGGER.error("No host name provided") return new_host_name = call.data["hostname"] file = "/data/data/pl.sviete.dom/.ais/ais-hostname" command = 'echo "net.hostname = ' + new_host_name + '" > ' + file import subprocess process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) process.wait() command = 'su -c "/data/data/pl.sviete.dom/.ais/run_as_root.sh"' process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) process.wait() def _change_remote_access(hass, call): import os text = " dostęp z Internetu" access = hass.states.get("input_boolean.ais_remote_access").state gate_id = hass.states.get("sensor.ais_secure_android_id_dom").state if access == "on": text = "Aktywuje " + text else: text = "Zatrzymuje " + text if ais_global.G_AIS_START_IS_DONE: hass.services.call("ais_ai_service", "say_it", {"text": text}) if access == "on": os.system("pm2 stop tunnel") os.system("pm2 delete tunnel") cmd = ( "pm2 start lt --name tunnel --output NULL --error NULL --restart-delay=30000 -- " "-h http://paczka.pro -p 8180 -s {}".format(gate_id) ) os.system(cmd) os.system("pm2 save") else: os.system("pm2 stop tunnel") os.system("pm2 delete tunnel") os.system("pm2 save") @asyncio.coroutine def _hdmi_control_disable(hass, call): comm = r'su -c "settings put global hdmi_control_enabled 0"' os.system(comm) @asyncio.coroutine def _change_wm_overscan(hass, call): if "value" not in call.data: _LOGGER.error("No value for overscan provided") return new_value = call.data["value"] cl = 0 ct = 0 cr = 0 cb = 0 try: import subprocess overscan = "" overscan = subprocess.check_output( "su -c \"dumpsys display | grep -o 'overscan.*' | cut -d')' -f1 | rev | cut -d'(' -f1 | rev\"", shell=True, timeout=10, ) overscan = overscan.decode("utf-8").replace("\n", "") if "," in overscan: cl = int(overscan.split(",")[0]) ct = int(overscan.split(",")[1]) cr = int(overscan.split(",")[2]) cb = int(overscan.split(",")[3]) except Exception: _LOGGER.warning("Can't get current overscan {}".format(overscan)) # [reset|LEFT,TOP,RIGHT,BOTTOM] if new_value == "reset": comm = r'su -c "wm overscan reset"' elif new_value == "left": comm = ( r'su -c "wm overscan ' + str(int(cl) - 3) + "," + str(ct) + "," + str(cr) + "," + str(cb) + '"' ) elif new_value == "top": comm = ( r'su -c "wm overscan ' + str(cl) + "," + str(int(ct) - 3) + "," + str(cr) + "," + str(cb) + '"' ) elif new_value == "right": comm = ( r'su -c "wm overscan ' + str(cl) + "," + str(ct) + "," + str(int(cr) - 3) + "," + str(cb) + '"' ) elif new_value == "bottom": comm = ( r'su -c "wm overscan ' + str(cl) + "," + str(ct) + "," + str(cr) + "," + str(int(cb) - 3) + '"' ) elif new_value == "-left": comm = ( r'su -c "wm overscan ' + str(int(cl) + 3) + "," + str(ct) + "," + str(cr) + "," + str(cb) + '"' ) elif new_value == "-top": comm = ( r'su -c "wm overscan ' + str(cl) + "," + str(int(ct) + 3) + "," + str(cr) + "," + str(cb) + '"' ) elif new_value == "-right": comm = ( r'su -c "wm overscan ' + str(cl) + "," + str(ct) + "," + str(int(cr) + 3) + "," + str(cb) + '"' ) elif new_value == "-bottom": comm = ( r'su -c "wm overscan ' + str(cl) + "," + str(ct) + "," + str(cr) + "," + str(int(cb) + 3) + '"' ) else: _LOGGER.error("Value for overscan provided {}".format(new_value)) return _LOGGER.info("comm: " + comm) os.system(comm) @asyncio.coroutine def _ssh_remote_access(hass, call): access = "on" if "access" in call.data: access = call.data["access"] gate_id = "ssh-" + hass.states.get("sensor.ais_secure_android_id_dom").state import os if access == "on": os.system("pm2 delete ssh-tunnel") os.system( "pm2 start lt --name ssh-tunnel --restart-delay=30000 -- -h http://paczka.pro -p 8888 -s " + gate_id ) os.system("pm2 save") _LOGGER.warning( "You have SSH access to gate on http://" + gate_id + ".paczka.pro" ) else: os.system("pm2 delete ssh-tunnel") os.system("pm2 save") @asyncio.coroutine def _key_event(hass, call): if "key_code" not in call.data: _LOGGER.error("No key_code") return key_code = call.data["key_code"] import subprocess subprocess.Popen( "su -c 'input keyevent " + key_code + "'", shell=True, stdout=None, stderr=None ) @asyncio.coroutine def _led(hass, call): if "brightness" not in call.data: _LOGGER.error("No brightness provided") return brightness = call.data["brightness"] script = str(os.path.dirname(__file__)) script += "/scripts/led.sh" import subprocess subprocess.Popen( "su -c ' " + script + " " + str(brightness) + "'", shell=True, stdout=None, stderr=None, ) @asyncio.coroutine def _set_ais_secure_android_id_dom(hass, call): # the G_AIS_SECURE_ANDROID_ID_DOM can be set from frame during the wifi_connection_info if ais_global.G_AIS_SECURE_ANDROID_ID_DOM is None: import subprocess try: android_id = subprocess.check_output( 'su -c "settings get secure android_id"', shell=True, timeout=15 ) android_id = android_id.decode("utf-8").replace("\n", "") except Exception as e: _LOGGER.info("Can't get secure gate id for the device! " + str(e)) from uuid import getnode as get_mac android_id = get_mac() ais_global.G_AIS_SECURE_ANDROID_ID_DOM = "dom-" + str(android_id) hass.states.async_set( "sensor.ais_secure_android_id_dom", ais_global.G_AIS_SECURE_ANDROID_ID_DOM, { "friendly_name": "Unikalny identyfikator bramki", "icon": "mdi:account-card-details", }, ) _LOGGER.info( "sensor.ais_secure_android_id_dom -> " + ais_global.G_AIS_SECURE_ANDROID_ID_DOM ) @asyncio.coroutine def _init_local_sdcard(hass, call): script = str(os.path.dirname(__file__)) script += "/scripts/init_local_sdcard.sh" import subprocess subprocess.Popen(script, shell=True, stdout=None, stderr=None) @asyncio.coroutine def _execute_command(hass, call): command = None ret_entity = None friendly_name = None icon = None if "command" not in call.data: _LOGGER.error("No command") return else: command = call.data["command"] if "entity_id" not in call.data: _LOGGER.debug("No entity_id to return the output") else: ret_entity = call.data["entity_id"] if "friendly_name" not in call.data: _LOGGER.debug("No friendly_name to set in returning output") else: friendly_name = call.data["friendly_name"] if "icon" not in call.data: _LOGGER.debug("No icon to set in returning output") else: icon = call.data["icon"] import subprocess process = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) output, err = process.communicate() _LOGGER.error("err: " + str(err)) if ret_entity is not None: hass.states.async_set( ret_entity, output, {"friendly_name": friendly_name, "icon": icon} ) @asyncio.coroutine def _execute_script(hass, call): if "script" not in call.data: _LOGGER.error("No script") return script = call.data["script"] if script == "reset_usb.sh": # take the full path script = str(os.path.dirname(__file__)) script += "/scripts/reset_usb.sh" import subprocess process = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE) process.wait() _LOGGER.info("_execute_script, return: " + str(process.returncode)) @asyncio.coroutine def _execute_restart(hass, call): import subprocess subprocess.Popen("su -c reboot", shell=True, stdout=None, stderr=None) @asyncio.coroutine def _execute_stop(hass, call): import subprocess subprocess.Popen("su -c 'reboot -p'", shell=True, stdout=None, stderr=None) @asyncio.coroutine def _show_network_devices_info(hass, call): import homeassistant.components.ais_device_search_mqtt.sensor as dsm info = dsm.get_text() hass.states.async_set("sensor.network_devices_info_value", "ok", {"text": info}) @asyncio.coroutine def _scan_device(hass, call): if "url" not in call.data: _LOGGER.error("No url") return url = call.data["url"] url_a = call.data["url_a"] from requests_futures.sessions import FuturesSession from urllib.parse import urlparse import homeassistant.components.ais_device_search_mqtt.sensor as dsm session = FuturesSession() def bg_cb(resp, *args, **kwargs): try: # parse the json storing the result on the response object json_ws_resp = resp.json() hostname = urlparse(resp.url).hostname name = json_ws_resp["Status"]["FriendlyName"][0] # ip = json_ws_resp["StatusNET"]["IPAddress"] dsm.NET_DEVICES.append("- " + name + ", http://" + hostname) info = dsm.get_text() hass.states.async_set( "sensor.network_devices_info_value", "", {"text": info} ) except Exception: pass def bg_cb_a(resp, *args, **kwargs): try: # parse the json storing the result on the response object json_ws_resp = resp.json() model = json_ws_resp["Model"] manufacturer = json_ws_resp["Manufacturer"] ip = json_ws_resp["IPAddressIPv4"] mac = json_ws_resp["MacWlan0"] dsm.DOM_DEVICES.append( "- " + model + " " + manufacturer + ", http://" +
: "readwrite", "description" : """""", }, # scalar "macAuthenticationNamePrefix" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.106.2", "status" : "current", "syntax" : { "type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"}, }, "access" : "readwrite", "description" : """""", }, # scalar "macAuthenticationPassword" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.106.3", "status" : "current", "syntax" : { "type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"}, }, "access" : "readwrite", "description" : """""", }, # scalar "macAuthenticationTimeout" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.106.4", "status" : "current", "syntax" : { "type" : { "module" :"", "name" : "Integer32"}, }, "access" : "readwrite", "description" : """""", }, # scalar "macAuthenticationPortTable" : { "nodetype" : "table", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.106.5", "status" : "current", "description" : """""", }, # table "macAuthenticationPortEntry" : { "nodetype" : "row", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.106.5.1", "status" : "current", "linkage" : [ "dot1dBasePort", ], "description" : """An entry in macAuthenticationPortTable.""", }, # row "macAuthenticationPortState" : { "nodetype" : "column", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.106.5.1.1", "status" : "current", "syntax" : { "type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"}, }, "access" : "readwrite", "description" : """""", }, # column "mstp" : { "nodetype" : "node", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107", }, # node "mstpGen" : { "nodetype" : "node", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1", }, # node "mstpGenState" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.1", "status" : "current", "syntax" : { "type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"}, }, "access" : "readwrite", "description" : """Enabled/disabled on the mrstp bridge.""", }, # scalar "mstpGenCfgIdName" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.2", "status" : "current", "syntax" : { "type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"}, }, "access" : "readwrite", "description" : """The configuration name that identifies the MST region and is used as one of the inputs in the computation of the MST Configuration Identifier.""", "reference>" : """12.12.3.4.2.b)""", }, # scalar "mstpGenCfgIdRevLevel" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.3", "status" : "current", "syntax" : { "type" : { "module" :"", "name" : "Integer32"}, }, "access" : "readwrite", "description" : """This object identifies the MST revision that identifies the MST region and is used as one of the inputs in the computation of the MST configuration Identifier.""", "reference>" : """12.12.3.4.2.c)""", }, # scalar "mstpGenCfgIdCfgDigest" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.4", "status" : "current", "syntax" : { "type" : { "basetype" : "OctetString", "ranges" : [ { "min" : "16", "max" : "16" }, ], "range" : { "min" : "16", "max" : "16" }, }, }, "access" : "readonly", "description" : """Configuration Digest.""", "reference>" : """12.12.3.3.3.a.4""", }, # scalar "mstpGenHelloTime" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.5", "status" : "current", "syntax" : { "type" : { "basetype" : "Integer32", "parent module" : { "name" : "BRIDGE-MIB", "type" : "Timeout", }, "ranges" : [ { "min" : "1", "max" : "10" }, ], "range" : { "min" : "1", "max" : "10" }, }, }, "access" : "readwrite", "description" : """""", }, # scalar "mstpGenMaxAge" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.6", "status" : "current", "syntax" : { "type" : { "basetype" : "Integer32", "parent module" : { "name" : "BRIDGE-MIB", "type" : "Timeout", }, "ranges" : [ { "min" : "6", "max" : "40" }, ], "range" : { "min" : "6", "max" : "40" }, }, }, "access" : "readwrite", "description" : """""", }, # scalar "mstpGenForwardDelay" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.7", "status" : "current", "syntax" : { "type" : { "basetype" : "Integer32", "parent module" : { "name" : "BRIDGE-MIB", "type" : "Timeout", }, "ranges" : [ { "min" : "4", "max" : "30" }, ], "range" : { "min" : "4", "max" : "30" }, }, }, "access" : "readwrite", "description" : """""", }, # scalar "mstpGenMaxHops" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.8", "status" : "current", "syntax" : { "type" : { "basetype" : "Integer32", "ranges" : [ { "min" : "4", "max" : "30" }, ], "range" : { "min" : "4", "max" : "30" }, }, }, "access" : "readwrite", "description" : """13.22.f)""", }, # scalar "mstpGenCistRootPathCost" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.9", "status" : "current", "syntax" : { "type" : { "module" :"", "name" : "Integer32"}, }, "access" : "readonly", "description" : """.""", }, # scalar "mstpGenCistRootBrid" : { "nodetype" : "scalar", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.1.10", "status" : "current", "syntax" : { "type" : { "basetype" : "OctetString", "ranges" : [ { "min" : "32", "max" : "32" }, ], "range" : { "min" : "32", "max" : "32" }, }, }, "access" : "readonly", "description" : """.""", }, # scalar "mstMapTable" : { "nodetype" : "table", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.20", "status" : "current", "description" : """This table contains one entry for each instance of MSTP.""", }, # table "mstMapEntry" : { "nodetype" : "row", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.20.1", "create" : "true", "status" : "current", "linkage" : [ "mstMapIndex", ], "description" : """A conceptual row containing the status of the MSTP instance.""", }, # row "mstMapIndex" : { "nodetype" : "column", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.20.1.1", "status" : "current", "syntax" : { "type" : { "module" :"ZYXEL-GS4012F-MIB", "name" : "MstiOrCistInstanceIndex"}, }, "access" : "noaccess", "description" : """Uniquely identifies an instance. The entry of this table with index 0 presents always, represents CIST. When SET operation """, }, # column "mstMapVlans1k" : { "nodetype" : "column", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.20.1.2", "status" : "current", "syntax" : { "type" : { "basetype" : "OctetString", "ranges" : [ { "min" : "0", "max" : "128" }, ], "range" : { "min" : "0", "max" : "128" }, }, }, "access" : "readwrite", "description" : """A string of octets containing one bit per VLAN. The first octet corresponds to VLANs with VlanIndex values 1 through 8; the second octet to VLANs 9 through 16 etc. The most significant bit of each octet corresponds to the lowest VlanIndex value in that octet. For each VLAN that is mapped to this MSTP instance, the bit corresponding to that VLAN is set to '1'. Empty (zero) most significant octes are not mandatory.""", }, # column "mstMapVlans2k" : { "nodetype" : "column", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.20.1.3", "status" : "current", "syntax" : { "type" : { "basetype" : "OctetString", "ranges" : [ { "min" : "0", "max" : "128" }, ], "range" : { "min" : "0", "max" : "128" }, }, }, "access" : "readwrite", "description" : """A string of octets containing one bit per VLAN for VLANS with VlanIndex values 1024 through 2047. The first octet corresponds to VLANs with VlanIndex values 1024 through 1031; the second octet to VLANs 1032 through 1039 etc. The most significant bit of each octet corresponds to the lowest VlanIndex value in that octet. For each VLAN that is mapped to this MSTP instance, the bit corresponding to that VLAN is set to '1'. Empty (zero) most significant octes are not mandatory.""", }, # column "mstMapVlans3k" : { "nodetype" : "column", "moduleName" : "ZYXEL-GS4012F-MIB", "oid" : "1.3.6.1.4.1.890.172.16.31.10.107.20.1.4", "status" : "current", "syntax" : { "type" : { "basetype" : "OctetString", "ranges" : [ { "min" : "0", "max" : "128" }, ], "range" : { "min" : "0", "max" : "128" }, }, }, "access" : "readwrite", "description" : """A string of octets containing one bit per VLAN for VLANS with VlanIndex values 2048 through 3071. The first octet corresponds to VLANs with VlanIndex values of 2048 through 2055; the second octet to VLANs 2056 through 2063 etc. The most significant bit of each octet corresponds
desc='Auxiliary power unit weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') win = Float(1.0, desc='Instrument Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') whyd = Float(1.0, desc='Hydraulics Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') welec = Float(1.0, desc='Electrical Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wavonc = Float(1.0, desc='Avionics Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') warm = Float(0.0, desc='Armament Group weight - includes thermal protection system or armor and fixed weapons\n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wfurn = Float(1.0, desc='Furnishings Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wac = Float(1.0, desc='Air Conditioning Group weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wai = Float(1.0, desc='Transports: Anti-icing Group weight\n Fighters: Auxiliary gear \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wuf = Float(1.0, desc='Weight of unusable fuel \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') woil = Float(1.0, desc='Engine oil weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wsrv = Float(1.0, desc='Transports: Passenger service weight\n Fighters: Ammunition and nonfixed weapons weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wcon = Float(1.0, desc='Transports: Cargo and baggage container weight Fighters: Miscellaneous operating items weight If < 0.5, as a fraction of Gross Weight \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wauxt = Float(1.0, desc='Auxiliary fuel tank weight (Fighters only) \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wflcrb = Float(1.0, desc='Total weight of flight crew and baggage\n (Defaults: Transports - 225.*NFLCR\n Fighters - 215.*NFLCR\n Carrier-based - 180.*NFLCR)\n \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') wstuab = Float(1.0, desc='Total weight of cabin crew and baggage (Default = 155.*NSTU + 200.*NGALC) \n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') ewmarg = Float(0.0, desc='Empty weight margin (Special Option) - delta weight added to Weight Empty. If abs(EWMARG) < 5., it is interpreted as a fraction of calculated Weight Empty. May be positive or negative\n < 0., negative of starting weight which will be modified as appropriate during optimization or parametric variation\n \n = 0., no weight for that component\n \n > 0. but < 5., scale factor applied to internally computed weight\n \n > 5., actual fixed weight for component') class FlopsWrapper_input_wtin_OEW_Calculations(VariableTree): """Container for input.wtin.OEW_Calculations.""" # OpenMDAO Public Variables ispowe = Enum(0, (0,1), desc='= 0, Normal FLOPS weight equations will be used\n= 1, Special equation for Operating Weight Empty will be used:\n \n OWE = SPWTH*THRUST + SPWSW*SW + SPWGW*GW + SPWCON\n \n Structures group weights will be scaled to meet the calculated OWE.\n \n = 2, Use response surface for weights - available only in DOSS version', aliases=('Normal FLOPS', 'Special eqn for OEW')) spwth = Float(2.2344, units='lb/lb', desc='Multiplier for thrust/engine in special equation for Operating Weight Empty\nSPWTH = \n AIRFLOWref\n(PODsclr + dOEWsclr) * ------------\n SLSTHRUSTref\n ') spwsw = Float(9.5, units='psf', desc='Multiplier for wing area in special equation for Operating Weight Empty') spwgw = Float(0.104087, units='lb/lb', desc='Multiplier for gross weight in special equation for Operating Weight Empty\nSPWGW = \n MTOWsclr+OEWgrwth*MTOWgrwth\n -----------------------------------\n 1. + MTOWgrowth\n\n') spwcon = Float(38584.0, units='lb', desc='Constant weight term in special equation for Operating Weight Empty\n \nSPWCON = OEWuncycled\n - MTOWscalar*MTOWuncycled\n - WINGscalar*SWref\n - (PODscalar + dOEWscalar)\n *AIRFLOWref\n') class FlopsWrapper_input_wtin_Landing_Gear(VariableTree): """Container for input.wtin.Landing_Gear""" # OpenMDAO Public Variables xmlg = Float(0.0, units='inch', desc='Length of extended main landing gear oleo (Default is computed internally)') xnlg = Float(0.0, units='inch', desc='Length of extended nose landing gear oleo (Default is computed internally)') wldg = Float(0.0, units='lb', desc='Design landing weight (if WRATIO is input in Namelist &AERIN, WLDG = GW*WRATIO) See Namelist &AERIN for WRATIO defaults.') mldwt = Enum(0, (1,0), desc='= 1, The design landing weight is set to the end of descent weight for the main mission plus DLDWT. Use only if IRW = 1 in Namelist &MISSIN. = 0, The design landing weight is determined by WLDG above or WRATIO in Namelist &AERIN') dldwt = Float(0.0, units='lb', desc='Delta landing weight for MLDWT = 1') carbas = Float(0.0, desc='Carrier based aircraft switch, affects weight
representing the last RNNs hidden state """ tbc, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return self.extract_top_state(hidden)[0] # TODO: this module only exists in pytorch. Do we eliminate it or put it in both? class LSTMEncoderSequenceHiddenContext(LSTMEncoderBase): def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: tbc, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return output, self.extract_top_state(hidden) class BiLSTMEncoderBase(nn.Module): """BiLSTM encoder base for a set of encoders producing various outputs. All BiLSTM encoders inheriting this class will trim the input to the max length given in the batch. For example, if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the constructor will be applied to the forward direction and half to the backward direction, and these will get concatenated. *PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this. Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl, set `batch_first=True`. *PyTorch Note*: Most `BiLSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the TensorFlow `BiLSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly. """ def __init__( self, insz: int, hsz: int, nlayers: int, pdrop: float = 0.0, requires_length: bool = True, batch_first: bool = False, unif: float = 0, initializer: str = None, **kwargs, ): """Produce a stack of LSTMs with dropout performed on all but the last layer. :param insz: The size of the input :param hsz: The number of hidden units per BiLSTM (`hsz//2` used for each direction and concatenated) :param nlayers: The number of layers of BiLSTMs to stack :param pdrop: The probability of dropping a unit value during dropout, defaults to 0 :param requires_length: Does this encoder require an input length in its inputs (defaults to `True`) :param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!) :param unif: PyTorch only! Initialization parameters for RNN :param initializer: PyTorch only! A string describing optional initialization type for RNN """ super().__init__() self.requires_length = requires_length self.batch_first = batch_first self.nlayers = nlayers if nlayers == 1: pdrop = 0.0 self.rnn = torch.nn.LSTM(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first) if initializer == "ortho": nn.init.orthogonal(self.rnn.weight_hh_l0) nn.init.orthogonal(self.rnn.weight_ih_l0) elif initializer == "he" or initializer == "kaiming": nn.init.kaiming_uniform(self.rnn.weight_hh_l0) nn.init.kaiming_uniform(self.rnn.weight_ih_l0) elif unif > 0: for weight in self.rnn.parameters(): weight.data.uniform_(-unif, unif) else: nn.init.xavier_uniform_(self.rnn.weight_hh_l0) nn.init.xavier_uniform_(self.rnn.weight_ih_l0) self.output_dim = hsz def extract_top_state(self, state): # Select the topmost state with -1 and the only direction is forward (select with 0) return tuple(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0] for s in state) # TODO: this module only exists in pytorch. Do we eliminate it or put it in both? class BiLSTMEncoderSequenceHiddenContext(BiLSTMEncoderBase): def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: tbc, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return output, self.extract_top_state(concat_state_dirs(hidden)) class BiLSTMEncoderAll(BiLSTMEncoderBase): """BiLSTM encoder that passes along the full output and hidden states for each layer Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence, and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """ def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: """ :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor `[B, S, H] or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]` """ tensor, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return output, concat_state_dirs(hidden) class BiLSTMEncoderSequence(BiLSTMEncoderBase): """BiLSTM encoder to produce the transduced output sequence. Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input sequence if the `max(lengths)` given is shorter than `T` during execution. *PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`, and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation. """ def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: """Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs The value `S` here is defined as `max(lengths)`, `S <= T` :param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]` :return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first` """ tensor, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return output class BiLSTMEncoderHidden(BiLSTMEncoderBase): """BiLSTM encoder that returns the top hidden state Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and returns a hidden unit tensor of shape `[B, H]` *PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification of `batch_first`. Also note that in PyTorch, this defaults to `True` """ def forward(self, inputs): """ :param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]` :return: An output tensor of shape `[B, H]` representing the last RNNs hidden state """ tensor, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return self.extract_top_state(concat_state_dirs(hidden))[0] # TODO: Add this to TF or remove class BiLSTMEncoderHiddenContext(BiLSTMEncoderBase): def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: tbc, lengths = inputs packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first) output, hidden = self.rnn(packed) output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first) return self.extract_top_state(concat_state_dirs(hidden)) class GRUEncoderBase(nn.Module): """The GRU encoder is a base for a set of encoders producing various outputs. All GRU encoders inheriting this class will trim the input to the max length given in the batch. For example, if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will be length `S` (or more precisely, `[B, S, H]`) *PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this. Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl, set `batch_first=True`. *PyTorch Note*: Most `GRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the TensorFlow `GRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly. """ def __init__( self, insz: int, hsz: int, nlayers: int, pdrop: float = 0.0, requires_length: bool = True, batch_first: bool = False, unif: float = 0, initializer: str = None, **kwargs, ): """Produce a stack of GRUs with dropout performed on all but the last layer. :param insz: The size of the input :param hsz: The number of hidden units per GRU :param nlayers: The number of
pred heatmap etc. pred_heatmaps, pred_obj_sizes, pred_offsets = self.predict(raw_images, need_conv_img=need_conv_img) # one sample if len(raw_images.shape) == 3: uplf_points, obj_sizes = pred_bbox_one_sample(pred_heatmaps, pred_obj_sizes, pred_offsets, (raw_images.shape[1], raw_images.shape[0]), ) # some sample else: uplf_points, obj_sizes = [], [] for hm, sz, ofs, raw_img in zip(pred_heatmaps, pred_obj_sizes, pred_offsets, raw_images): uplf_pt, obj_sz = pred_bbox_one_sample(hm, sz, ofs, (raw_img.shape[1], raw_img.shape[0])) uplf_points.append(uplf_pt) obj_sizes.append(obj_sz) return uplf_points, obj_sizes class CropedCenternetPipeline_Ensemble191008_19100201Ver3_19100801Ver3sv2(): def __init__(self, iou_thresh=None, score_thresh=None): self.IOU_THRESHOLD = iou_thresh if iou_thresh is not None else 0.4 self.SCORE_THRESHOLD = score_thresh if score_thresh is not None else 0.5 self.cnet1 = CropedCenternetPipeline_19100201Ver3(iou_thresh=iou_thresh, score_thresh=score_thresh) self.cnet2 = CropedCenternetPipeline_19100801Ver3sv2(iou_thresh=iou_thresh, score_thresh=score_thresh) return def crop_image_including_all_bbox(self, image, upleft_points, obj_sizes, return_bbox=True): """ Returns: croped image: image including all bbox upleft_points after croping: return if return_bbox==True obj_sizes after croping: return if return_bbox==True """ return self.cnet1.crop_image_including_all_bbox(image, upleft_points, obj_sizes, return_bbox) def inv_crop_image_including_all_bbox(self, image_size_wh_before_crop, upleft_points_before_crop, obj_sizes_before_crop, upleft_points, obj_sizes): return self.cnet1.inv_crop_image_including_all_bbox(image_size_wh_before_crop, upleft_points_before_crop, obj_sizes_before_crop, upleft_points, obj_sizes) def load_model(self): self.cnet1.load_model() self.cnet2.load_model() return def predict_bbox(self, raw_images, need_conv_img=True): # conversion class instance conv_cnet_oup = cn_data_op.ConvertCenterNetOutput(num_classes=1, image_shape=self.cnet1.INPUT_SHAPE) # func for one sample def pred_bbox_one_sample(_hm, _sz, _ofs, _raw_img_sz): _uplf_points, _obj_sizes = conv_cnet_oup.to_upleft_points_object_sizes([_hm], [_sz], [_ofs], self.IOU_THRESHOLD, self.SCORE_THRESHOLD) # shape (num_sample=1, num_class=1, num_obj, 2) - > (num_class=1, num_obj, 2) _uplf_points = _uplf_points[0] _obj_sizes = _obj_sizes[0] # rescale if len(_uplf_points) > 0: _uplf_points, _obj_sizes = bbox_proc.BoundingBoxProcessing.resize_image( upleft_points=_uplf_points, obj_sizes=_obj_sizes, before_img_size=self.cnet1.INPUT_SIZE, after_img_size=_raw_img_sz, ) return _uplf_points, _obj_sizes pred_heatmaps1, pred_obj_sizes1, pred_offsets1 = self.cnet1.predict(raw_images, need_conv_img=need_conv_img) pred_heatmaps2, pred_obj_sizes2, pred_offsets2 = self.cnet2.predict(raw_images, need_conv_img=need_conv_img) pred_heatmaps = (pred_heatmaps1 + pred_heatmaps2) * 0.5 pred_obj_sizes = (pred_obj_sizes1 + pred_obj_sizes2) * 0.5 pred_offsets = (pred_offsets1 + pred_offsets2) * 0.5 # one sample if len(raw_images.shape) == 3: uplf_points, obj_sizes = pred_bbox_one_sample(pred_heatmaps, pred_obj_sizes, pred_offsets, (raw_images.shape[1], raw_images.shape[0]), ) # some sample else: uplf_points, obj_sizes = [], [] for hm, sz, ofs, raw_img in zip(pred_heatmaps, pred_obj_sizes, pred_offsets, raw_images): uplf_pt, obj_sz = pred_bbox_one_sample(hm, sz, ofs, (raw_img.shape[1], raw_img.shape[0])) uplf_points.append(uplf_pt) obj_sizes.append(obj_sz) return uplf_points, obj_sizes class DetecPipeline_Ens_CenternetPipeline_Ensemble190923_19091801Ver2_19091802Ver3_CropedCenternetPipeline_Ensemble191008_19100201Ver3_19100801Ver3sv(): def __init__(self, iou_thresh=None, score_thresh=None, use_union_area=True): self.IOU_THRESHOLD = iou_thresh if iou_thresh is not None else 0.4 self.SCORE_THRESHOLD = score_thresh if score_thresh is not None else 0.5 self.USE_UNION_AREA = use_union_area self.cnet = CenternetPipeline_Ensemble190923_19091801Ver2_19091802Ver3(iou_thresh=iou_thresh, score_thresh=score_thresh) self.croped_cnet = CropedCenternetPipeline_Ensemble191008_19100201Ver3_19100801Ver3sv2(iou_thresh=iou_thresh, score_thresh=score_thresh) return def load_model(self): self.cnet.load_model() self.croped_cnet.load_model() return def predict_bbox(self, raw_images, need_conv_img=True): pred_upleft_points, pred_object_sizes = self.cnet.predict_bbox(raw_images, need_conv_img=need_conv_img) def _pred_bbox_one_sample(_img, _uplfs, _obj_szs): #print('centernet') #visu.Visualization.visualize_pred_result(_img, _uplfs[0], _obj_szs[0]) _croped_img = self.croped_cnet.crop_image_including_all_bbox(_img, _uplfs, _obj_szs, return_bbox=False) _pred_uplfs, _pred_obj_szs = self.croped_cnet.predict_bbox(_croped_img[np.newaxis], need_conv_img=need_conv_img) #visu.Visualization.visualize_pred_result(_croped_img, _pred_uplfs[0][0], _pred_obj_szs[0][0]) _pred_uplfs, _pred_obj_szs = self.croped_cnet.inv_crop_image_including_all_bbox(_img.shape[1::-1], _uplfs, _obj_szs, _pred_uplfs[0], _pred_obj_szs[0]) #print('croped centernet') #visu.Visualization.visualize_pred_result(_img, _pred_uplfs[0], _pred_obj_szs[0]) # have bbox if _uplfs.shape[1] != 0: _pred_uplfs, _pred_obj_szs = self.nms_bbox([_uplfs[0], _pred_uplfs[0]], [_obj_szs[0], _pred_obj_szs[0]], self.IOU_THRESHOLD) _pred_uplfs, _pred_obj_szs = _pred_uplfs[np.newaxis], _pred_obj_szs[np.newaxis] #print('ens') #visu.Visualization.visualize_pred_result(_img, _pred_uplfs[0], _pred_obj_szs[0]) return _pred_uplfs, _pred_obj_szs # one sample if len(raw_images.shape) == 3: uplf_points, obj_sizes = _pred_bbox_one_sample(raw_images, pred_upleft_points[0], pred_object_sizes[0]) # some sample else: uplf_points, obj_sizes = [], [] for imgs, p_uplf, p_obj_sz in zip(raw_images, pred_upleft_points, pred_object_sizes): uplfs, obj_szs = _pred_bbox_one_sample(imgs, p_uplf, p_obj_sz) uplf_points.append(uplfs) obj_sizes.append(obj_szs) #visu.Visualization.visualize_pred_result(img[0], pred_upleft_points, pred_object_sizes) return uplf_points, obj_sizes def nms_bbox(self, upleft_points_list, object_sizes_list, iou_threshold): """ Args: upleft_points_list: [upleft_points1, upleft_points2, ...], upleft_points shape = (num_box, 2) object_sizes_list: [object_sizes1, object_sizes2, ...], object_sizes shape = (num_box, 2) """ uplfs = np.concatenate(upleft_points_list, axis=0) obj_szs = np.concatenate(object_sizes_list, axis=0) bottomrights = uplfs + obj_szs boxes = np.concatenate([uplfs, bottomrights], axis=1) scores = obj_szs[:,0] * obj_szs[:,1] nms_boxes = op_util.nms(boxes, scores, iou_threshold, use_union_area=self.USE_UNION_AREA) nms_uplfs = nms_boxes[:,:2] nms_obj_szs = nms_boxes[:,2:4] - nms_boxes[:,:2] return nms_uplfs, nms_obj_szs class ResNetPipeline_191002AspectVer9(): """ image preprocessing: gausiann filter -> gamma correction -> ben's preprocessing -> gaussian filter -> median filter random erasing """ def __init__(self): self.__config() self.__initilize() return def __config(self): self.INPUT_SIZE = (64, 64) self.INPUT_SHAPE = self.INPUT_SIZE[::-1] + (1,) self.OTHTER_INPUT_SHAPE = (1,) self.NUM_CLASS = len(kzsj_data.KuzushijiDataSet().get_letter_number_dict()[0]) self.RESNET_VERSION = 'ver2' self.MODEL_DIR = os.path.join('.', 'result_recog', 'test191002_aspect_ver9', 'my_resnet') self.MODEL_FILE = os.path.join(self.MODEL_DIR, 'trained_model.h5') self.TTA_WIDTH_SHIFT_RANGE = 0.1 self.TTA_HIGHT_SHIFT_RANGE = 0.1 return def __initilize(self): self.__build_model_instance() return def __conv_data_to_input(self, raw_images): # Conversion function def conv_func_one_sample(_img): #visu.Visualization.visualize_gray_img(_img) # gaussian filter _conv_img = image_proc.ImageProcessing.gaussian_filter(_img, karnelsize=5) # gamma_correction GAMMA = 0.7 _conv_img = image_proc.ImageProcessing.gamma_correction(_conv_img, gamma=GAMMA, strength_criteria_is_0=True, linear=True, to_uint8=True) # ben's preprocessing _conv_img = image_proc.ImageProcessing.ben_preprocessing(_conv_img, base=128) # gaussian filter _conv_img = image_proc.ImageProcessing.gaussian_filter(_conv_img, karnelsize=5) # median filter _conv_img = image_proc.ImageProcessing.median_filter(_conv_img, karnelsize=5) # resize _conv_img = image_proc.ImageProcessing.resize(image=_conv_img, to_size=self.INPUT_SIZE, keep_aspect_ratio=False) #visu.Visualization.visualize_gray_img(_conv_img) # normalize _conv_img = (_conv_img.astype('float32') - 127.5) / 127.5 # aspect w/h _log_aspect_wh = np.log(_img.shape[1] / _img.shape[0]) return _conv_img, _log_aspect_wh # shape = (H,W,C) if len(raw_images.shape) == 3: conv_imgs, log_aspects = conv_func_one_sample(raw_images) # shape = (num_sampel,H,W,C) else: conv_imgs = [] log_aspects = [] for img in raw_images: conv_img, log_aspect = conv_func_one_sample(img) conv_imgs.append(conv_img) log_aspects.append(log_aspect) conv_imgs = np.array(conv_imgs) log_aspects = np.array(log_aspects) return [conv_imgs, log_aspects] def __build_model_instance(self): self.my_resnet = resnet.MyResNet(image_shape=self.INPUT_SHAPE, num_class=self.NUM_CLASS, resnet_version=self.RESNET_VERSION, other_input_shape=self.OTHTER_INPUT_SHAPE) return def __use_indexes(self, use_train_data, use_val_data, seed): kzsj_dataset = kzsj_data.KuzushijiDataSet() # data num if use_train_data: data_num = kzsj_dataset.get_train_data_num() else: data_num = kzsj_dataset.get_test_data_num() # data idx if use_train_data: np.random.seed(seed) data_idxes = np.random.choice(data_num, int(data_num*0.8), replace=False) if use_val_data: data_idxes = np.setdiff1d(np.arange(data_num), data_idxes) data_idxes = np.sort(data_idxes) else: data_idxes = range(data_num) return data_idxes def __read_img(self, indexes, use_train_data, need_print=False): kzsj_dataset = kzsj_data.KuzushijiDataSet() if use_train_data: imgs, ids = kzsj_dataset.read_train_image(indexs=indexes, to_gray=True, need_print=need_print) else: imgs, ids = kzsj_dataset.read_test_image(indexs=indexes, to_gray=True, need_print=need_print) return imgs, ids def __read_train_upleftpoint_size(self, indexes=None): kzsj_dataset = kzsj_data.KuzushijiDataSet() upleft_points, object_sizes = kzsj_dataset.read_train_upleftpoint_size(indexes) return upleft_points, object_sizes def __crop_img(self, img, upleft_points, obj_sizes): croped_imgs = image_proc.ImageProcessing.crop(img, upleft_points[:,0], upleft_points[:,1], obj_sizes[:,0], obj_sizes[:,1]) return croped_imgs def __read_and_conv_img(self, indexes, need_print=False): imgs = None log_aspects = None for idata in indexes: if need_print: if (idata+1) % 1 == 0: print('\r read image {0}/{1}'.format(idata + 1, len(indexes)), end="") # read image img, _ = self.__read_img(indexes=idata, use_train_data=True, need_print=False) # read upleftpoint, size #upleft_points : ndarray( [[x0,y0], [x1,y1], ...] * num_classes * num_data ), shape=(num_data, num_classes, num_keypoint, 2) #object_sizes : ndarray( [[w0,h0], [w1,h1], ...] * num_classes * num_data ), shape=(num_data, num_classes, num_keypoint, 2) upleft_points, object_sizes = self.__read_train_upleftpoint_size(indexes=idata) # have bbox if len(upleft_points[0][0]) != 0: # crop image letter_imgs = self.__crop_img(img[0], upleft_points[0][0], object_sizes[0][0]) # conv conv_imgs_log_aspects = self.__conv_data_to_input(letter_imgs) if imgs is None: imgs = conv_imgs_log_aspects[0] else: imgs = np.concatenate([imgs, conv_imgs_log_aspects[0]], axis=0) if log_aspects is None: log_aspects = conv_imgs_log_aspects[1] else: log_aspects = np.concatenate([log_aspects, conv_imgs_log_aspects[1]], axis=0) if need_print: print() return [imgs, log_aspects] def __read_train_letter_no(self, idexes): kzsj_dataset = kzsj_data.KuzushijiDataSet() letter_nos = kzsj_dataset.read_train_letter_no(idexes) flatten_letter_nos = [] for ltno in letter_nos: flatten_letter_nos.extend(ltno.tolist()) flatten_letter_nos = np.array(flatten_letter_nos) return flatten_letter_nos def __train(self, tr_inputs, tr_letter_no_onehot, val_input, val_letter_no_onehot): # buid model self.my_resnet.build_model() # save dir save_dir = self.MODEL_DIR if not os.path.isdir(save_dir): os.makedirs(save_dir) save_model_file = self.MODEL_FILE save_csv_file = os.path.join(save_dir, 'train_hist.csv') shutil.copy(__file__, save_dir) # training LEARNING_RATE = 0.001 EPOCHS = 200 # 200 BATCH_SIZE = 512 RANDOM_ERASING_KWARGS = {'erasing_prob':0.5, 'area_rate_low':0.02, 'area_rate_high':0.4, 'aspect_rate_low':0.3, 'aspect_rate_high':3.3} # fit self.my_resnet.train_model(tr_inputs, tr_letter_no_onehot, val_input, val_letter_no_onehot, learning_rate=LEARNING_RATE, epochs=EPOCHS, batch_size=BATCH_SIZE, random_erasing_kwargs=RANDOM_ERASING_KWARGS, save_file=None, csv_file=save_csv_file) # save model self.my_resnet.save_model(save_file=save_model_file, only_model_plot=False) return def __eval(self, true_letter_no, pred_letter_no, save_file): acc = metrics.accuracy_score(true_letter_no, pred_letter_no) print(' acc : {0}'.format(acc)) report = metrics.classification_report(true_letter_no, pred_letter_no) with open(save_file, mode='w') as f: f.write(report) return def run_train(self): SEED = 2020 # use indexs tr_data_idxes = self.__use_indexes(use_train_data=True, use_val_data=False, seed=SEED) val_data_idxes = self.__use_indexes(use_train_data=True, use_val_data=True, seed=SEED) #test_num = 10 #tr_data_idxes = np.random.choice(tr_data_idxes, test_num, replace=False) #val_data_idxes = np.random.choice(val_data_idxes, test_num, replace=False) # data # image tr_inputs = self.__read_and_conv_img(tr_data_idxes, need_print=True) val_input = self.__read_and_conv_img(val_data_idxes, need_print=True) # letter no tr_letter_nos = self.__read_train_letter_no(tr_data_idxes) tr_letter_no_onehot = keras.utils.to_categorical(tr_letter_nos, self.NUM_CLASS) val_letter_nos = self.__read_train_letter_no(val_data_idxes) val_letter_no_onehot = keras.utils.to_categorical(val_letter_nos, self.NUM_CLASS) # training self.__train(tr_inputs, tr_letter_no_onehot, val_input, val_letter_no_onehot) # eval pred_tr_letter_nos = self.__predict_using_input(tr_inputs) pred_val_letter_nos = self.__predict_using_input(val_input) self.__eval(tr_letter_nos, pred_tr_letter_nos, os.path.join(self.MODEL_DIR, 'result_report_tr.txt')) self.__eval(val_letter_nos, pred_val_letter_nos, os.path.join(self.MODEL_DIR, 'result_report_val.txt')) return def load_model(self): self.my_resnet.load_model(self.MODEL_FILE) return def predict(self, raw_images, soft=False): inputs = self.__conv_data_to_input(raw_images) # tta class instance tta_trans9 = classification_tta.TranslateAugmentation_9case( image_size_hw=self.INPUT_SIZE, width_shift_range=self.TTA_WIDTH_SHIFT_RANGE, height_shift_range=self.TTA_HIGHT_SHIFT_RANGE ) # predict oups = self.my_resnet.predict_tta(images=inputs[0], tta_func=tta_trans9.augment_image, other_inputs=inputs[1], soft=soft) return oups def __predict_using_input(self, inputs, soft=False): # tta class instance tta_trans9 = classification_tta.TranslateAugmentation_9case( image_size_hw=self.INPUT_SIZE, width_shift_range=self.TTA_WIDTH_SHIFT_RANGE, height_shift_range=self.TTA_HIGHT_SHIFT_RANGE ) # predict oups = self.my_resnet.predict_tta(images=inputs[0], tta_func=tta_trans9.augment_image, other_inputs=inputs[1], soft=soft) return oups def __stratify_train_test_split(self, labels, test_size_rate, random_state, shuffle): data_num = len(labels) # どのラベルが1個しか含まれていないか bin_counts = np.bincount(labels) unique_labels = np.nonzero(bin_counts)[0] unique_label_counts = bin_counts[unique_labels] one_labels = unique_labels[unique_label_counts==1] #
import string import warnings import numpy as np from pandas import ( DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range, timedelta_range, ) from .pandas_vb_common import tm class GetNumericData: def setup(self): self.df = DataFrame(np.random.randn(10000, 25)) self.df["foo"] = "bar" self.df["bar"] = "baz" self.df = self.df._consolidate() def time_frame_get_numeric_data(self): self.df._get_numeric_data() class Lookup: def setup(self): self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh")) self.df["foo"] = "bar" self.row_labels = list(self.df.index[::10])[:900] self.col_labels = list(self.df.columns) * 100 self.row_labels_all = np.array( list(self.df.index) * len(self.df.columns), dtype="object" ) self.col_labels_all = np.array( list(self.df.columns) * len(self.df.index), dtype="object" ) def time_frame_fancy_lookup(self): self.df.lookup(self.row_labels, self.col_labels) def time_frame_fancy_lookup_all(self): self.df.lookup(self.row_labels_all, self.col_labels_all) class Reindex: def setup(self): N = 10 ** 3 self.df = DataFrame(np.random.randn(N * 10, N)) self.idx = np.arange(4 * N, 7 * N) self.idx_cols = np.random.randint(0, N, N) self.df2 = DataFrame( { c: { 0: np.random.randint(0, 2, N).astype(np.bool_), 1: np.random.randint(0, N, N).astype(np.int16), 2: np.random.randint(0, N, N).astype(np.int32), 3: np.random.randint(0, N, N).astype(np.int64), }[np.random.randint(0, 4)] for c in range(N) } ) def time_reindex_axis0(self): self.df.reindex(self.idx) def time_reindex_axis1(self): self.df.reindex(columns=self.idx_cols) def time_reindex_axis1_missing(self): self.df.reindex(columns=self.idx) def time_reindex_both_axes(self): self.df.reindex(index=self.idx, columns=self.idx_cols) def time_reindex_upcast(self): self.df2.reindex(np.random.permutation(range(1200))) class Rename: def setup(self): N = 10 ** 3 self.df = DataFrame(np.random.randn(N * 10, N)) self.idx = np.arange(4 * N, 7 * N) self.dict_idx = {k: k for k in self.idx} self.df2 = DataFrame( { c: { 0: np.random.randint(0, 2, N).astype(np.bool_), 1: np.random.randint(0, N, N).astype(np.int16), 2: np.random.randint(0, N, N).astype(np.int32), 3: np.random.randint(0, N, N).astype(np.int64), }[np.random.randint(0, 4)] for c in range(N) } ) def time_rename_single(self): self.df.rename({0: 0}) def time_rename_axis0(self): self.df.rename(self.dict_idx) def time_rename_axis1(self): self.df.rename(columns=self.dict_idx) def time_rename_both_axes(self): self.df.rename(index=self.dict_idx, columns=self.dict_idx) def time_dict_rename_both_axes(self): self.df.rename(index=self.dict_idx, columns=self.dict_idx) class Iteration: # mem_itertuples_* benchmarks are slow timeout = 120 def setup(self): N = 1000 self.df = DataFrame(np.random.randn(N * 10, N)) self.df2 = DataFrame(np.random.randn(N * 50, 10)) self.df3 = DataFrame( np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)] ) self.df4 = DataFrame(np.random.randn(N * 1000, 10)) def time_items(self): # (monitor no-copying behaviour) if hasattr(self.df, "_item_cache"): self.df._item_cache.clear() for name, col in self.df.items(): pass def time_items_cached(self): for name, col in self.df.items(): pass def time_iteritems_indexing(self): for col in self.df3: self.df3[col] def time_itertuples_start(self): self.df4.itertuples() def time_itertuples_read_first(self): next(self.df4.itertuples()) def time_itertuples(self): for row in self.df4.itertuples(): pass def time_itertuples_to_list(self): list(self.df4.itertuples()) def mem_itertuples_start(self): return self.df4.itertuples() def peakmem_itertuples_start(self): self.df4.itertuples() def mem_itertuples_read_first(self): return next(self.df4.itertuples()) def peakmem_itertuples(self): for row in self.df4.itertuples(): pass def mem_itertuples_to_list(self): return list(self.df4.itertuples()) def peakmem_itertuples_to_list(self): list(self.df4.itertuples()) def time_itertuples_raw_start(self): self.df4.itertuples(index=False, name=None) def time_itertuples_raw_read_first(self): next(self.df4.itertuples(index=False, name=None)) def time_itertuples_raw_tuples(self): for row in self.df4.itertuples(index=False, name=None): pass def time_itertuples_raw_tuples_to_list(self): list(self.df4.itertuples(index=False, name=None)) def mem_itertuples_raw_start(self): return self.df4.itertuples(index=False, name=None) def peakmem_itertuples_raw_start(self): self.df4.itertuples(index=False, name=None) def peakmem_itertuples_raw_read_first(self): next(self.df4.itertuples(index=False, name=None)) def peakmem_itertuples_raw(self): for row in self.df4.itertuples(index=False, name=None): pass def mem_itertuples_raw_to_list(self): return list(self.df4.itertuples(index=False, name=None)) def peakmem_itertuples_raw_to_list(self): list(self.df4.itertuples(index=False, name=None)) def time_iterrows(self): for row in self.df.iterrows(): pass class ToString: def setup(self): self.df = DataFrame(np.random.randn(100, 10)) def time_to_string_floats(self): self.df.to_string() class ToHTML: def setup(self): nrows = 500 self.df2 = DataFrame(np.random.randn(nrows, 10)) self.df2[0] = period_range("2000", periods=nrows) self.df2[1] = range(nrows) def time_to_html_mixed(self): self.df2.to_html() class ToDict: params = [["dict", "list", "series", "split", "records", "index"]] param_names = ["orient"] def setup(self, orient): data = np.random.randint(0, 1000, size=(10000, 4)) self.int_df = DataFrame(data) self.datetimelike_df = self.int_df.astype("timedelta64[ns]") def time_to_dict_ints(self, orient): self.int_df.to_dict(orient=orient) def time_to_dict_datetimelike(self, orient): self.datetimelike_df.to_dict(orient=orient) class ToNumpy: def setup(self): N = 10000 M = 10 self.df_tall = DataFrame(np.random.randn(N, M)) self.df_wide = DataFrame(np.random.randn(M, N)) self.df_mixed_tall = self.df_tall.copy() self.df_mixed_tall["foo"] = "bar" self.df_mixed_tall[0] = period_range("2000", periods=N) self.df_mixed_tall[1] = range(N) self.df_mixed_wide = self.df_wide.copy() self.df_mixed_wide["foo"] = "bar" self.df_mixed_wide[0] = period_range("2000", periods=M) self.df_mixed_wide[1] = range(M) def time_to_numpy_tall(self): self.df_tall.to_numpy() def time_to_numpy_wide(self): self.df_wide.to_numpy() def time_to_numpy_mixed_tall(self): self.df_mixed_tall.to_numpy() def time_to_numpy_mixed_wide(self): self.df_mixed_wide.to_numpy() def time_values_tall(self): self.df_tall.values def time_values_wide(self): self.df_wide.values def time_values_mixed_tall(self): self.df_mixed_tall.values def time_values_mixed_wide(self): self.df_mixed_wide.values class Repr: def setup(self): nrows = 10000 data = np.random.randn(nrows, 10) arrays = np.tile(np.random.randn(3, nrows // 100), 100) idx = MultiIndex.from_arrays(arrays) self.df3 = DataFrame(data, index=idx) self.df4 = DataFrame(data, index=np.random.randn(nrows)) self.df_tall = DataFrame(np.random.randn(nrows, 10)) self.df_wide = DataFrame(np.random.randn(10, nrows)) def time_html_repr_trunc_mi(self): self.df3._repr_html_() def time_html_repr_trunc_si(self): self.df4._repr_html_() def time_repr_tall(self): repr(self.df_tall) def time_frame_repr_wide(self): repr(self.df_wide) class MaskBool: def setup(self): data = np.random.randn(1000, 500) df = DataFrame(data) df = df.where(df > 0) self.bools = df > 0 self.mask = isnull(df) def time_frame_mask_bools(self): self.bools.mask(self.mask) def time_frame_mask_floats(self): self.bools.astype(float).mask(self.mask) class Isnull: def setup(self): N = 10 ** 3 self.df_no_null = DataFrame(np.random.randn(N, N)) sample = np.array([np.nan, 1.0]) data = np.random.choice(sample, (N, N)) self.df = DataFrame(data) sample = np.array(list(string.ascii_letters + string.whitespace)) data = np.random.choice(sample, (N, N)) self.df_strings = DataFrame(data) sample = np.array( [ NaT, np.nan, None, np.datetime64("NaT"), np.timedelta64("NaT"), 0, 1, 2.0, "", "abcd", ] ) data = np.random.choice(sample, (N, N)) self.df_obj = DataFrame(data) def time_isnull_floats_no_null(self): isnull(self.df_no_null) def time_isnull(self): isnull(self.df) def time_isnull_strngs(self): isnull(self.df_strings) def time_isnull_obj(self): isnull(self.df_obj) class Fillna: params = ( [True, False], ["pad", "bfill"], [ "float64", "float32", "object", "Int64", "Float64", "datetime64[ns]", "datetime64[ns, tz]", "timedelta64[ns]", ], ) param_names = ["inplace", "method", "dtype"] def setup(self, inplace, method, dtype): N, M = 10000, 100 if dtype in ("datetime64[ns]", "datetime64[ns, tz]", "timedelta64[ns]"): data = { "datetime64[ns]": date_range("2011-01-01", freq="H", periods=N), "datetime64[ns, tz]": date_range( "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" ), "timedelta64[ns]": timedelta_range(start="1 day", periods=N, freq="1D"), } self.df = DataFrame({f"col_{i}": data[dtype] for i in range(M)}) self.df[::2] = None else: values = np.random.randn(N, M) values[::2] = np.nan if dtype == "Int64": values = values.round() self.df = DataFrame(values, dtype=dtype) def time_frame_fillna(self, inplace, method, dtype): self.df.fillna(inplace=inplace, method=method) class Dropna: params = (["all", "any"], [0, 1]) param_names = ["how", "axis"] def setup(self, how, axis): self.df = DataFrame(np.random.randn(10000, 1000)) self.df.iloc[50:1000, 20:50] = np.nan self.df.iloc[2000:3000] = np.nan self.df.iloc[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" def time_dropna(self, how, axis): self.df.dropna(how=how, axis=axis) def time_dropna_axis_mixed_dtypes(self, how, axis): self.df_mixed.dropna(how=how, axis=axis) class Count: params = [0, 1] param_names = ["axis"] def setup(self, axis): self.df = DataFrame(np.random.randn(10000, 1000)) self.df.iloc[50:1000, 20:50] = np.nan self.df.iloc[2000:3000] = np.nan self.df.iloc[:, 60:70] = np.nan self.df_mixed = self.df.copy() self.df_mixed["foo"] = "bar" self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index]) self.df.columns = MultiIndex.from_arrays([self.df.columns, self.df.columns]) self.df_mixed.index = MultiIndex.from_arrays( [self.df_mixed.index, self.df_mixed.index] ) self.df_mixed.columns = MultiIndex.from_arrays( [self.df_mixed.columns, self.df_mixed.columns] ) def time_count_level_multi(self, axis): self.df.count(axis=axis, level=1) def time_count_level_mixed_dtypes_multi(self, axis): self.df_mixed.count(axis=axis, level=1) class Apply: def setup(self): self.df = DataFrame(np.random.randn(1000, 100)) self.s = Series(np.arange(1028.0)) self.df2 = DataFrame({i: self.s for i in range(1028)}) self.df3 = DataFrame(np.random.randn(1000, 3), columns=list("ABC")) def time_apply_user_func(self): self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)]) def time_apply_axis_1(self): self.df.apply(lambda x: x + 1, axis=1) def time_apply_lambda_mean(self): self.df.apply(lambda x: x.mean()) def time_apply_np_mean(self): self.df.apply(np.mean) def time_apply_pass_thru(self): self.df.apply(lambda x: x) def time_apply_ref_by_name(self): self.df3.apply(lambda x: x["A"] + x["B"], axis=1) class Dtypes: def setup(self): self.df = DataFrame(np.random.randn(1000, 1000)) def time_frame_dtypes(self): self.df.dtypes class Equals: def setup(self): N = 10 ** 3 self.float_df = DataFrame(np.random.randn(N, N)) self.float_df_nan = self.float_df.copy() self.float_df_nan.iloc[-1, -1] = np.nan self.object_df = DataFrame("foo", index=range(N), columns=range(N)) self.object_df_nan = self.object_df.copy() self.object_df_nan.iloc[-1, -1] = np.nan self.nonunique_cols = self.object_df.copy() self.nonunique_cols.columns = ["A"] * len(self.nonunique_cols.columns) self.nonunique_cols_nan = self.nonunique_cols.copy() self.nonunique_cols_nan.iloc[-1, -1] = np.nan def time_frame_float_equal(self): self.float_df.equals(self.float_df) def time_frame_float_unequal(self): self.float_df.equals(self.float_df_nan) def time_frame_nonunique_equal(self): self.nonunique_cols.equals(self.nonunique_cols) def time_frame_nonunique_unequal(self): self.nonunique_cols.equals(self.nonunique_cols_nan) def time_frame_object_equal(self): self.object_df.equals(self.object_df) def time_frame_object_unequal(self): self.object_df.equals(self.object_df_nan) class Interpolate: params = [None, "infer"] param_names = ["downcast"] def setup(self, downcast): N = 10000 # this is the worst case, where every column has NaNs. arr = np.random.randn(N, 100) # NB: we need to set values in array, not in df.values, otherwise # the benchmark will be misleading for ArrayManager arr[::2] = np.nan self.df = DataFrame(arr) self.df2 = DataFrame( { "A": np.arange(0, N), "B": np.random.randint(0, 100, N), "C": np.random.randn(N), "D": np.random.randn(N), } ) self.df2.loc[1::5, "A"] = np.nan self.df2.loc[1::5, "C"] = np.nan def time_interpolate(self, downcast): self.df.interpolate(downcast=downcast) def time_interpolate_some_good(self, downcast): self.df2.interpolate(downcast=downcast) class Shift: # frame shift speedup issue-5609 params = [0, 1] param_names = ["axis"] def setup(self, axis): self.df = DataFrame(np.random.rand(10000, 500)) def time_shift(self, axis): self.df.shift(1, axis=axis) class Nunique: def setup(self): self.df = DataFrame(np.random.randn(10000, 1000)) def time_frame_nunique(self): self.df.nunique() class SeriesNuniqueWithNan: def setup(self): self.ser = Series(100000 * (100 * [np.nan] + list(range(100)))).astype(float) def time_series_nunique_nan(self): self.ser.nunique() class Duplicated: def setup(self): n = 1 << 20 t = date_range("2015-01-01", freq="S", periods=(n // 64)) xs = np.random.randn(n // 64).round(2) self.df = DataFrame( { "a": np.random.randint(-1 << 8, 1 << 8, n), "b": np.random.choice(t, n), "c": np.random.choice(xs, n), } ) self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T def time_frame_duplicated(self): self.df.duplicated() def time_frame_duplicated_wide(self): self.df2.duplicated() class XS: params = [0, 1] param_names = ["axis"] def setup(self, axis): self.N = 10 ** 4 self.df = DataFrame(np.random.randn(self.N, self.N)) def time_frame_xs(self, axis): self.df.xs(self.N / 2, axis=axis) class SortValues: params = [True, False] param_names = ["ascending"] def setup(self, ascending): self.df = DataFrame(np.random.randn(1000000, 2), columns=list("AB")) def time_frame_sort_values(self, ascending): self.df.sort_values(by="A", ascending=ascending) class SortIndexByColumns: def setup(self): N = 10000 K
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # QuantStats: Portfolio analytics for quants # https://github.com/ranaroussi/quantstats # # Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from warnings import warn import pandas as _pd import numpy as _np from math import ceil as _ceil, sqrt as _sqrt from scipy.stats import ( norm as _norm, linregress as _linregress ) from . import utils as _utils # ======== STATS ======== def pct_rank(prices, window=60): """Rank prices by window""" rank = _utils.multi_shift(prices, window).T.rank(pct=True).T return rank.iloc[:, 0] * 100. def compsum(returns): """Calculates rolling compounded returns""" return returns.add(1).cumprod() - 1 def comp(returns): """Calculates total compounded returns""" return returns.add(1).prod() - 1 def distribution(returns, compounded=True, prepare_returns=True): def get_outliers(data): # https://datascience.stackexchange.com/a/57199 Q1 = data.quantile(0.25) Q3 = data.quantile(0.75) IQR = Q3 - Q1 # IQR is interquartile range. filtered = (data >= Q1 - 1.5 * IQR) & (data <= Q3 + 1.5 * IQR) return { "values": data.loc[filtered].tolist(), "outliers": data.loc[~filtered].tolist(), } if isinstance(returns, _pd.DataFrame): warn("Pandas DataFrame was passed (Series expeted). " "Only first column will be used.") returns = returns.copy() returns.columns = map(str.lower, returns.columns) if len(returns.columns) > 1 and 'close' in returns.columns: returns = returns['close'] else: returns = returns[returns.columns[0]] apply_fnc = comp if compounded else _np.sum daily = returns.dropna() if prepare_returns: daily = _utils._prepare_returns(daily) return { "Daily": get_outliers(daily), "Weekly": get_outliers(daily.resample('W-MON').apply(apply_fnc)), "Monthly": get_outliers(daily.resample('M').apply(apply_fnc)), "Quarterly": get_outliers(daily.resample('Q').apply(apply_fnc)), "Yearly": get_outliers(daily.resample('A').apply(apply_fnc)) } def expected_return(returns, aggregate=None, compounded=True, prepare_returns=True): """ Returns the expected return for a given period by calculating the geometric holding period return """ if prepare_returns: returns = _utils._prepare_returns(returns) returns = _utils.aggregate_returns(returns, aggregate, compounded) return _np.product(1 + returns) ** (1 / len(returns)) - 1 def geometric_mean(retruns, aggregate=None, compounded=True): """Shorthand for expected_return()""" return expected_return(retruns, aggregate, compounded) def ghpr(retruns, aggregate=None, compounded=True): """Shorthand for expected_return()""" return expected_return(retruns, aggregate, compounded) def outliers(returns, quantile=.95): """Returns series of outliers""" return returns[returns > returns.quantile(quantile)].dropna(how='all') def remove_outliers(returns, quantile=.95): """Returns series of returns without the outliers""" return returns[returns < returns.quantile(quantile)] def best(returns, aggregate=None, compounded=True, prepare_returns=True): """Returns the best day/month/week/quarter/year's return""" if prepare_returns: returns = _utils._prepare_returns(returns) return _utils.aggregate_returns(returns, aggregate, compounded).max() def worst(returns, aggregate=None, compounded=True, prepare_returns=True): """Returns the worst day/month/week/quarter/year's return""" if prepare_returns: returns = _utils._prepare_returns(returns) return _utils.aggregate_returns(returns, aggregate, compounded).min() def consecutive_wins(returns, aggregate=None, compounded=True, prepare_returns=True): """Returns the maximum consecutive wins by day/month/week/quarter/year""" if prepare_returns: returns = _utils._prepare_returns(returns) returns = _utils.aggregate_returns(returns, aggregate, compounded) > 0 return _utils._count_consecutive(returns).max() def consecutive_losses(returns, aggregate=None, compounded=True, prepare_returns=True): """ Returns the maximum consecutive losses by day/month/week/quarter/year """ if prepare_returns: returns = _utils._prepare_returns(returns) returns = _utils.aggregate_returns(returns, aggregate, compounded) < 0 return _utils._count_consecutive(returns).max() def exposure(returns, prepare_returns=True): """Returns the market exposure time (returns != 0)""" if prepare_returns: returns = _utils._prepare_returns(returns) def _exposure(ret): ex = len(ret[(~_np.isnan(ret)) & (ret != 0)]) / len(ret) return _ceil(ex * 100) / 100 if isinstance(returns, _pd.DataFrame): _df = {} for col in returns.columns: _df[col] = _exposure(returns[col]) return _pd.Series(_df) return _exposure(returns) def win_rate(returns, aggregate=None, compounded=True, prepare_returns=True): """Calculates the win ratio for a period""" def _win_rate(series): try: return len(series[series > 0]) / len(series[series != 0]) except Exception: return 0. if prepare_returns: returns = _utils._prepare_returns(returns) if aggregate: returns = _utils.aggregate_returns(returns, aggregate, compounded) if isinstance(returns, _pd.DataFrame): _df = {} for col in returns.columns: _df[col] = _win_rate(returns[col]) return _pd.Series(_df) return _win_rate(returns) def avg_return(returns, aggregate=None, compounded=True, prepare_returns=True): """Calculates the average return/trade return for a period""" if prepare_returns: returns = _utils._prepare_returns(returns) if aggregate: returns = _utils.aggregate_returns(returns, aggregate, compounded) return returns[returns != 0].dropna().mean() def avg_win(returns, aggregate=None, compounded=True, prepare_returns=True): """ Calculates the average winning return/trade return for a period """ if prepare_returns: returns = _utils._prepare_returns(returns) if aggregate: returns = _utils.aggregate_returns(returns, aggregate, compounded) return returns[returns > 0].dropna().mean() def avg_loss(returns, aggregate=None, compounded=True, prepare_returns=True): """ Calculates the average low if return/trade return for a period """ if prepare_returns: returns = _utils._prepare_returns(returns) if aggregate: returns = _utils.aggregate_returns(returns, aggregate, compounded) return returns[returns < 0].dropna().mean() def volatility(returns, periods=252, annualize=True, prepare_returns=True): """Calculates the volatility of returns for a period""" if prepare_returns: returns = _utils._prepare_returns(returns) std = returns.std() if annualize: return std * _np.sqrt(periods) return std def rolling_volatility(returns, rolling_period=126, periods_per_year=252, prepare_returns=True): if prepare_returns: returns = _utils._prepare_returns(returns, rolling_period) return returns.rolling(rolling_period).std() * _np.sqrt(periods_per_year) def implied_volatility(returns, periods=252, annualize=True): """Calculates the implied volatility of returns for a period""" logret = _utils.log_returns(returns) if annualize: return logret.rolling(periods).std() * _np.sqrt(periods) return logret.std() def autocorr_penalty(returns, prepare_returns=False): """Metric to account for auto correlation""" if prepare_returns: returns = _utils._prepare_returns(returns) if isinstance(returns, _pd.DataFrame): returns = returns[returns.columns[0]] # returns.to_csv('/Users/ran/Desktop/test.csv') num = len(returns) coef = _np.abs(_np.corrcoef(returns[:-1], returns[1:])[0, 1]) corr = [((num - x)/num) * coef ** x for x in range(1, num)] return _np.sqrt(1 + 2 * _np.sum(corr)) # ======= METRICS ======= def sharpe(returns, rf=0., periods=252, annualize=True, smart=False): """ Calculates the sharpe ratio of access returns If rf is non-zero, you must specify periods. In this case, rf is assumed to be expressed in yearly (annualized) terms Args: * returns (Series, DataFrame): Input return series * rf (float): Risk-free rate expressed as a yearly (annualized) return * periods (int): Freq. of returns (252/365 for daily, 12 for monthly) * annualize: return annualize sharpe? * smart: return smart sharpe ratio """ if rf != 0 and periods is None: raise Exception('Must provide periods if rf != 0') returns = _utils._prepare_returns(returns, rf, periods) divisor = returns.std(ddof=1) if smart: # penalize sharpe with auto correlation divisor = divisor * autocorr_penalty(returns) res = returns.mean() / divisor if annualize: return res * _np.sqrt( 1 if periods is None else periods) return res def smart_sharpe(returns, rf=0., periods=252, annualize=True): return sharpe(returns, rf, periods, annualize, True) def rolling_sharpe(returns, rf=0., rolling_period=126, annualize=True, periods_per_year=252, prepare_returns=True): if rf != 0 and rolling_period is None: raise Exception('Must provide periods if rf != 0') if prepare_returns: returns = _utils._prepare_returns(returns, rf, rolling_period) res = returns.rolling(rolling_period).mean() / \ returns.rolling(rolling_period).std() if annualize: res = res * _np.sqrt( 1 if periods_per_year is None else periods_per_year) return res def sortino(returns, rf=0, periods=252, annualize=True, smart=False): """ Calculates the sortino ratio of access returns If rf is non-zero, you must specify periods. In this case, rf is assumed to be expressed in yearly (annualized) terms Calculation is based on this paper by Red Rock Capital http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf """ if rf != 0 and periods is None: raise Exception('Must provide periods if rf != 0') returns = _utils._prepare_returns(returns, rf, periods) downside = _np.sqrt((returns[returns < 0] ** 2).sum() / len(returns)) if smart: # penalize sortino with auto correlation downside = downside * autocorr_penalty(returns) res = returns.mean() / downside if annualize: return res * _np.sqrt( 1 if periods is None else periods) return res def smart_sortino(returns, rf=0, periods=252, annualize=True): return sortino(returns, rf, periods, annualize, True) def rolling_sortino(returns, rf=0, rolling_period=126, annualize=True, periods_per_year=252, **kwargs): if rf != 0 and rolling_period is None: raise Exception('Must provide periods if rf != 0') if kwargs.get("prepare_returns", True): returns = _utils._prepare_returns(returns, rf, rolling_period) downside = returns.rolling(rolling_period).apply( lambda x: (x.values[x.values < 0]**2).sum()) / rolling_period res = returns.rolling(rolling_period).mean() / _np.sqrt(downside) if annualize: res = res * _np.sqrt( 1 if periods_per_year is None else periods_per_year) return res def adjusted_sortino(returns, rf=0, periods=252, annualize=True, smart=False): """ <NAME>'s version of the Sortino ratio allows for direct comparisons to the Sharpe. See here for more info: https://archive.is/wip/2rwFW """ data = sortino( returns, rf, periods=periods, annualize=annualize, smart=smart) return data / _sqrt(2) def omega(returns, rf=0.0, required_return=0.0, periods=252): """ Determines the Omega ratio of a strategy. See https://en.wikipedia.org/wiki/Omega_ratio for more details. """ if len(returns) < 2: return _np.nan if required_return <= -1: return _np.nan returns = _utils._prepare_returns(returns, rf, periods) if periods == 1: return_threshold = required_return else: return_threshold = (1 + required_return) ** (1. / periods) - 1 returns_less_thresh = returns - return_threshold numer = returns_less_thresh[returns_less_thresh > 0.0].sum().values[0] denom = -1.0 * returns_less_thresh[returns_less_thresh < 0.0].sum().values[0] if denom > 0.0: return numer / denom return _np.nan def gain_to_pain_ratio(returns, rf=0, resolution="D"): """ <NAME>'s GPR. See here for more info: https://archive.is/wip/2rwFW """ returns = _utils._prepare_returns(returns, rf).resample(resolution).sum() downside = abs(returns[returns < 0].sum()) return returns.sum() / downside def cagr(returns, rf=0., compounded=True):
<gh_stars>10-100 import base64 from collections import defaultdict import copy import json from numbers import Number import re from django.conf import settings from django.contrib.sites.shortcuts import get_current_site from django.core.cache import cache from django.core.files.base import ContentFile from django.db.transaction import atomic from django.utils import timezone from django_comments.models import Comment from rest_framework import serializers from rest_framework.reverse import reverse from qatrack.api.attachments.serializers import AttachmentSerializer from qatrack.api.comments.serializers import CommentSerializer from qatrack.attachments.models import Attachment from qatrack.qa import models, signals from qatrack.qa.views.perform import CompositePerformer, UploadHandler from qatrack.qatrack_core.dates import parse_date, parse_datetime from qatrack.qatrack_core.serializers import QATrackJSONEncoder from qatrack.service_log import models as sl_models BASE64_RE = re.compile("^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$") class FrequencySerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Frequency fields = "__all__" class TestInstanceStatusSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.TestInstanceStatus fields = "__all__" class AutoReviewRuleSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.AutoReviewRule fields = "__all__" class AutoReviewRuleSetSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.AutoReviewRuleSet fields = "__all__" class ReferenceSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Reference fields = "__all__" class ToleranceSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Tolerance fields = "__all__" class CategorySerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Category fields = "__all__" class TestSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Test fields = "__all__" class SublistSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Sublist fields = "__all__" class TestListSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.TestList fields = "__all__" class UnitTestInfoSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.UnitTestInfo fields = "__all__" class TestListMembershipSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.TestListMembership fields = "__all__" class UTCTestsObjectRelatedField(serializers.RelatedField): """ A custom field to use for the `tests_object` generic relationship. """ def to_representation(self, obj): if isinstance(obj, models.TestList): return reverse("testlist-detail", kwargs={'pk': obj.pk}, request=self.context['request']) return reverse("testlistcycle-detail", kwargs={'pk': obj.pk}, request=self.context['request']) class UnitTestCollectionSerializer(serializers.HyperlinkedModelSerializer): tests_object = UTCTestsObjectRelatedField(read_only=True) next_test_list = serializers.SerializerMethodField(read_only=True) next_day = serializers.SerializerMethodField(read_only=True) class Meta: model = models.UnitTestCollection fields = "__all__" def get_tests_object(self, obj): if isinstance(obj, models.TestList): return reverse("testlist-detail", kwargs={'pk': obj.pk}, request=self.context['request']) return reverse("testlistcycle-detail", kwargs={'pk': obj.pk}, request=self.context['request']) def get_next_test_list(self, obj): next_day, next_list = obj.next_list() if isinstance(next_list, models.TestList): return reverse("testlist-detail", kwargs={'pk': next_list.pk}, request=self.context['request']) return None def get_next_day(self, obj): next_day, next_list = obj.next_list() return next_day class TestInstanceSerializer(serializers.HyperlinkedModelSerializer): attachments = AttachmentSerializer(many=True, source="attachment_set", required=False) class Meta: model = models.TestInstance fields = "__all__" class TestInstanceCreator(serializers.HyperlinkedModelSerializer): class Meta: model = models.TestInstance fields = ["value", "string_value", "date_value", "datetime_value", "skipped", "comment", "macro"] class TestListInstanceSerializer(serializers.HyperlinkedModelSerializer): attachments = AttachmentSerializer(many=True, source="attachment_set", required=False) comments = CommentSerializer(many=True, required=False) test_instances = TestInstanceSerializer(many=True, source="testinstance_set", required=False) site_url = serializers.SerializerMethodField(read_only=True) class Meta: model = models.TestListInstance fields = "__all__" def get_site_url(self, obj): if obj: return reverse("view_test_list_instance", kwargs={'pk': obj.pk}, request=self.context['request']) return "" class TestListInstanceCreator(serializers.HyperlinkedModelSerializer): work_completed = serializers.DateTimeField(default=lambda: timezone.now()) comment = serializers.CharField(required=False) tests = serializers.DictField() status = serializers.HyperlinkedRelatedField( view_name="testinstancestatus-detail", queryset=models.TestInstanceStatus.objects.all(), required=False, ) return_to_service_qa = serializers.HyperlinkedRelatedField( view_name="returntoserviceqa-detail", queryset=sl_models.ReturnToServiceQA.objects.all(), required=False, ) unit_test_collection = serializers.HyperlinkedRelatedField( view_name="unittestcollection-detail", queryset=models.UnitTestCollection.objects.all(), ) attachments = serializers.ListField(required=False) # made read_only since we get the test list from the UTC & day test_list = serializers.HyperlinkedRelatedField( view_name="testlist-detail", read_only=True, ) site_url = serializers.SerializerMethodField(read_only=True) class Meta: model = models.TestListInstance exclude = [ "modified", "modified_by", "created", ] def get_site_url(self, obj): if obj: return reverse("view_test_list_instance", kwargs={'pk': obj.pk}, request=self.context['request']) return "" def validate_tests(self, tests): err_fields = ['"%s"' % slug for slug, data in tests.items() if not self.valid_test(data)] if err_fields: fields = ', '.join(err_fields) msg = '%s field(s) have errors. Test data must be a dictionary ' % fields raise serializers.ValidationError(msg) return tests def validate_attachments(self, attachments): attach_objs = [] for attach in attachments: is_dict = isinstance(attach, dict) if not is_dict or 'filename' not in attach or 'value' not in attach: msg = ( '`attachments` field must be list of form ' '[{"filename": "file_name.txt", "value": "<base64 encoded bytes|text>", ' '"encoding": "<base64|text>], ...]' ) raise serializers.ValidationError(msg) attach_objs.append(self.make_attachment(attach)) return attach_objs def make_attachment(self, data): content = data['value'] if data.get("encoding", "base64") == "base64": if not BASE64_RE.match(content): raise serializers.ValidationError("base64 encoding requested but content does not appear to be base64") content = base64.b64decode(content) user = self.context['request'].user return Attachment( attachment=ContentFile(content, data['filename']), comment="Uploaded %s by %s" % (timezone.now(), user.username), label=data['filename'], created_by=user, ) def valid_test(self, test_data): is_dict = isinstance(test_data, dict) return is_dict def add_data_from_instance(self, data): tis = self.instance.testinstance_set.select_related( "unit_test_info", "unit_test_info__test", ) if "tests" not in data: data['tests'] = {} for ti in tis: test = ti.unit_test_info.test slug = test.slug if slug not in data['tests']: if test.is_upload(): upload = ti.get_value() data['tests'][slug] = { 'value': base64.b64encode(upload.attachment.read()).decode(), 'encoding': 'base64', 'filename': ti.string_value, 'comment': ti.comment, } else: data['tests'][slug] = { 'value': ti.get_value(), 'comment': ti.comment, } elif slug in data['tests'] and data['tests'][slug].get('skipped'): data['tests'][slug] = { 'value': None, 'comment': data['tests'][slug].get("comment", ti.comment), 'skipped': True, } for key in ["work_completed", "work_started", "in_progress", "include_for_scheduling", "user_key"]: data[key] = data.get(key, getattr(self.instance, key)) def validate(self, data): post_data = copy.deepcopy(data) validated_data = super(TestListInstanceCreator, self).validate(data) if self.instance: self.add_data_from_instance(validated_data) validated_data = self.preprocess(validated_data) test_qs = self.tl.all_tests().values_list("slug", "type", "calculation_procedure") missing = [] wrong_types = [] invalid_autos = [] msgs = [] auto_types = [models.CONSTANT] + list(models.CALCULATED_TYPES) for slug, type_, procedure in test_qs: if slug not in validated_data['tests']: missing.append(slug) continue skipped = validated_data['tests'][slug].get("skipped") provided_val = post_data.get('tests', {}).get(slug, {}).get("value") validated_val = validated_data['tests'][slug].get("value") if not skipped and type_ not in auto_types and not self.type_okay(type_, validated_val): wrong_types.append(slug) if type_ in auto_types and not self.autovalue_ok(validated_val, provided_val): invalid_autos.append(slug) elif type_ in auto_types and not self.type_okay(type_, validated_val) and not skipped: wrong_types.append(slug) d = validated_data['tests'][slug] if type_ in models.STRING_TYPES and slug in validated_data['tests']: d['string_value'] = d.pop('value', "") elif type_ == models.DATE and slug in validated_data['tests']: d['date_value'] = parse_date(d.pop('value', "")) elif type_ == models.DATETIME and slug in validated_data['tests']: dt = parse_datetime(d.pop('value', "")) dt = timezone.make_aware(dt) if dt and timezone.is_naive(dt) else dt d['datetime_value'] = dt elif type_ == models.UPLOAD and slug in validated_data['tests']: # remove base64 data d.pop('value', "") # string value needs to be set to attachment id for later editing d['string_value'] = self.ti_attachments[slug][0] d['json_value'] = json.dumps(self.ti_upload_analysis_data[slug], cls=QATrackJSONEncoder) if missing: msgs.append("Missing data for tests: %s" % ', '.join(missing)) if wrong_types: msg = '\n'.join([ "Wrong value type (number/string) for tests: %s" % ', '.join(wrong_types), ( "If these are composite tests with missing dependencies, " "they should be marked as skipped in your request " "(e.g. {'tests': {'%s': {'skipped': True}}})" % wrong_types[0] ), ]) msgs.append(msg) if invalid_autos: msgs.append( "The following tests are calculated automatically and should not have values " "provided: %s" % ', '.join(invalid_autos) ) if validated_data['work_completed'] < validated_data['work_started']: msgs.append("work_completed date must be after work_started") if msgs: raise serializers.ValidationError('\n'.join(msgs)) return validated_data def type_okay(self, type_, val): if type_ in models.STRING_TYPES + models.DATE_TYPES and not isinstance(val, str): try: json.dumps(val) except Exception: return False elif type_ in models.NUMERICAL_TYPES and not isinstance(val, Number): return False return True def autovalue_ok(self, calculated, provided): not_provided = provided in (None, "") values_match = calculated == provided return not_provided or values_match def preprocess(self, validated_data): if self.instance: self.utc = self.instance.unit_test_collection self.day = self.instance.day self.tl = self.instance.test_list else: self.utc = validated_data['unit_test_collection'] self.day = validated_data.get('day') if self.utc.content_type.model == "testlist" and self.day is None: self.day = 0 elif self.utc.content_type.model == "testlistcycle" and self.day is None: raise serializers.ValidationError("You must include the 'day' key when performing a Test List Cycle") try: self.day = int(self.day) except TypeError: raise serializers.ValidationError("The 'day' key must be an integer") min_day, max_day = 0, len(self.utc.tests_object) - 1 if not (min_day <= self.day <= max_day): raise serializers.ValidationError( "'%s' is not a valid day for this Test Collection. " "Day must be between %s & %s" % (self.day, min_day, max_day) ) self.day, self.tl = self.utc.get_list(day=self.day) test_qs = self.tl.all_tests().values_list("id", "slug", "type", "constant_value") has_composite = False uploads = [] for pk, slug, type_, cv in test_qs: if type_ == models.CONSTANT: # here we get data for the test (comments etc) and make sure the constant value # is set correctly (so the user can't send an incorrect value for the constant value) d = validated_data['tests'].get(slug, {}) v = d.get("value") if v not in ("", None) and v != cv: raise serializers.ValidationError("Incorrect constant value passed for %s" % slug) d['value'] = cv validated_data['tests'][slug] = d elif type_ == models.UPLOAD: d = validated_data['tests'].get(slug, {}) uploads.append((pk, slug, d)) elif type_ in models.CALCULATED_TYPES: has_composite = True if slug not in validated_data['tests']: validated_data['tests'][slug] = {'value': ''} elif 'value' not in validated_data['tests'][slug]: validated_data['tests'][slug]['value'] = "" self.ti_attachments = defaultdict(list) self.ti_upload_analysis_data = {} user = self.context['request'].user if has_composite or uploads: comp_calc_data = self.data_to_composite(validated_data) for pk, slug, d in uploads: comp_calc_data['test_id'] = pk try: fname = d['filename'] except KeyError: raise serializers.ValidationError("%s is missing the filename field" % slug) content = d['value'] if d.get("encoding", "base64") == "base64": if not BASE64_RE.match(content): raise serializers.ValidationError( "base64 encoding requested but content does not appear to be base64" ) content = base64.b64decode(content) f = ContentFile(content, fname)
from abc import abstractmethod from dataclasses import dataclass from html import escape from typing import Dict, Generator, Union, Iterable class BaseTag: """ In HTML5 there are six different kinds of elements: void elements, the template element, raw text elements, escapable raw text elements, foreign elements, and normal elements. Tags are used to delimit the start and end of elements in the markup. To simplify the serialization code we _only_ distinguish between tags that need an end tag and those that don't. See https://html.spec.whatwg.org/multipage/syntax.html#elements-2. """ @abstractmethod def _to_html(self) -> Generator[str, None, None]: pass def to_html(self) -> str: return "".join(self._to_html()) Class = Union[str, Iterable[str]] Attr = Dict[str, Union[str, None]] @dataclass class SelfClosingTag(BaseTag): """ The start and end tags of certain normal elements can be omitted. Those that cannot be omitted must not be omitted. Void elements only have a start tag; end tags must not be specified for void elements. Foreign elements must either have a start tag and an end tag, or a start tag that is marked as self-closing, in which case they must not have an end tag. This class implements the serialization of tags that do _not_ have an end tag ("self-closing"). As such, SelfClosingTag instances do _not_ have children. See https://html.spec.whatwg.org/multipage/syntax.html#elements-2. """ tag: str attr: Attr class_: Class def _to_html(self) -> Generator[str, None, None]: yield "<" yield self.tag yield from class_to_html(self.class_) yield from attr_to_html(self.attr) yield "/>" Children = Union[ BaseTag, str, Iterable[Union[BaseTag, str]], ] @dataclass class FullTag(BaseTag): """ Raw text, escapable raw text, and normal elements have a start tag to indicate where they begin, and an end tag to indicate where they end. This class implements the serialization of tags that require an end tag. FullTags are allowed to have children. During serialization, those child tags are serialized in a recursive fashion. Raw text is not represented using a separate tag, but can be listed as a child. See https://html.spec.whatwg.org/multipage/syntax.html#elements-2. """ tag: str children: Children class_: Class attr: Attr def _to_html(self) -> Generator[str, None, None]: yield "<" yield self.tag yield from class_to_html(self.class_) yield from attr_to_html(self.attr) yield ">" yield from children_to_html(self.children) yield "</" yield self.tag yield ">" @dataclass class FullTagWithPrefix(FullTag): """ Supports adding a custom prefix to a full tag. This is useful for the html element, which typically has to be prefixed with a corresponding doctype. """ prefix: str def _to_html(self) -> Generator[str, None, None]: yield self.prefix yield from super()._to_html() @dataclass class DangerousHtml(BaseTag): """ By default, all strings are escaped to prevent cross-site scripting (XSS) attacks. DangerousHtml can be used to inject unescaped HTML. This should be used sparingly. It is useful as an escape hatch. """ html: str def _to_html(self) -> Generator[str, None, None]: yield self.html @dataclass class Fragment(BaseTag): """ Tags accept lists of children. Sometimes it is desirable to serialize those children without wrapping the resulting HTML into start and end tags. """ children: Children def _to_html(self) -> Generator[str, None, None]: yield from children_to_html(self.children) def children_to_html(children: Children) -> Generator[str, None, None]: if isinstance(children, BaseTag): yield from children.to_html() elif isinstance(children, str): yield escape(children) elif isinstance(children, Iterable): for child in children: if isinstance(child, BaseTag): yield from child.to_html() elif isinstance(child, str): yield escape(child) def class_to_html(class_: Class) -> Generator[str, None, None]: """ Helper function used for serializing a list of class names to a corresponding HTML attribute. Technically this could be avoided by converting the class names to attributes prior to constructing the node, but this would incur additional overhead as we'd then have to reconstruct the attribute (and thus go over it twice). Given how common this use case is, it has been special cased. """ final = class_ if isinstance(class_, str) else " ".join(class_) if final == "": return yield ' class="' yield final yield '"' def attr_to_html(attr: Attr) -> Generator[str, None, None]: """ Attributes for an element are expressed inside the element's start tag. Attributes can be specified in four different ways: 1. Empty attribute syntax 2. Unquoted attribute value syntax 3. Single-quoted attribute value syntax 4. Double-quoted attribute value syntax Only 1. and 4. are supported. Empty attributes can be expressed by using the None value. See https://html.spec.whatwg.org/multipage/syntax.html#attributes-2. """ if len(attr) == 0: return for k in attr: yield " " yield k v = attr[k] if isinstance(v, str): yield '="' yield escape(v, quote=True) yield '"' # The following functions might seem a bit verbose, but is easier to type. # Currently Python does not support typing higher-order function that return # Callables with optional arguments. # Void elements # See https://html.spec.whatwg.org/multipage/syntax.html#elements-2. def Area(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="area", class_=class_, attr=attr) def Base(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="base", class_=class_, attr=attr) def Br(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="br", class_=class_, attr=attr) def Col(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="col", class_=class_, attr=attr) def Embed(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="embed", class_=class_, attr=attr) def Hr(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="hr", class_=class_, attr=attr) def Img(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="img", class_=class_, attr=attr) def Input(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="input", class_=class_, attr=attr) def Link(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="link", class_=class_, attr=attr) def Meta(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="meta", class_=class_, attr=attr) def Param(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="param", class_=class_, attr=attr) def Source(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="source", class_=class_, attr=attr) def Track(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="track", class_=class_, attr=attr) def Wbr(class_: Class = [], **attr: Union[str, None]) -> SelfClosingTag: return SelfClosingTag(tag="wbr", class_=class_, attr=attr) # All other elements # The spec maintained by WHATWG misses an exhaustive list of all permissible # elements. Thus we're referring to W3C in this case. # See https://www.w3.org/TR/2011/WD-html-markup-20110113/elements.html. def A( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="a", children=children, class_=class_, attr=attr) def Abbr( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="abbr", children=children, class_=class_, attr=attr) def Address( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="address", children=children, class_=class_, attr=attr) def Article( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="article", children=children, class_=class_, attr=attr) def Aside( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="aside", children=children, class_=class_, attr=attr) def Audio( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="audio", children=children, class_=class_, attr=attr) def B( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="b", children=children, class_=class_, attr=attr) def Bdi( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="bdi", children=children, class_=class_, attr=attr) def Bdo( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="bdo", children=children, class_=class_, attr=attr) def Blockquote( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag( tag="blockquote", children=children, class_=class_, attr=attr, ) def Body( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="body", children=children, class_=class_, attr=attr) def Button( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="button", children=children, class_=class_, attr=attr) def Canvas( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="canvas", children=children, class_=class_, attr=attr) def Caption( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="caption", children=children, class_=class_, attr=attr) def Cite( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="cite", children=children, class_=class_, attr=attr) def Code( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="code", children=children, class_=class_, attr=attr) def Colgroup( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="colgroup", children=children, class_=class_, attr=attr) def Command( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="command", children=children, class_=class_, attr=attr) def Datalist( children: Children = [], class_: Class = [], **attr: Union[str, None], ) -> FullTag: return FullTag(tag="datalist",
import random import os import itertools import tensorflow as tf from tensorflow import newaxis as ax from basic.attention_modules import hotpot_biattention, zhong_selfatt from basic.batcher import get_batch_feed_dict from my.tensorflow import get_initializer from my.tensorflow.nn import softsel, get_logits, linear_logits, highway_network, multi_conv1d, dense from my.tensorflow.ops import bi_cudnn_rnn_encoder from snmn.nmn_model import NMN_Model def get_multi_gpu_models(config, emb_mat=None): models = [] with tf.variable_scope(tf.get_variable_scope()) as vscope: for gpu_idx in range(config.num_gpus): with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)): if gpu_idx > 0: tf.get_variable_scope().reuse_variables() model = Model(config, scope, emb_mat, rep=gpu_idx == 0) models.append(model) return models class Model(object): def __init__(self, config, scope, emb_mat, rep=True): self.scope = scope self.config = config self.emb_mat = emb_mat self.global_step = tf.get_variable('global_step', shape=[], dtype='int32', initializer=tf.constant_initializer(0), trainable=False) N, M, JX, JQ, VW, VC, W = \ config.batch_size, config.max_num_sents, config.max_sent_size, \ config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size self.x = tf.placeholder('int32', [N, None, None], name='x') self.cx = tf.placeholder('int32', [N, None, None, W], name='cx') self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask') if config.dataset == 'hotpotqa': self.q_type_labels = tf.placeholder('int32', [N, None], name='q_type_labels') self.q_yesno_labels = tf.placeholder('int32', [N, None], name='q_yesno_labels') self.yes_no = tf.placeholder('bool', [N], name='yes_no') self.max_para_size = tf.placeholder('int32', [], name='max_para_size') self.q = tf.placeholder('int32', [N, None], name='q') self.cq = tf.placeholder('int32', [N, None, W], name='cq') self.q_mask = tf.placeholder('bool', [N, None], name='q_mask') self.y = tf.placeholder('bool', [N, None, None], name='y') self.y2 = tf.placeholder('bool', [N, None, None], name='y2') self.wy = tf.placeholder('bool', [N, None, None], name='wy') self.is_train = tf.placeholder('bool', [], name='is_train') self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat') self.na = tf.placeholder('bool', [N], name='na') if config.supervise_bridge_entity: self.bridge_word_in_context_ids = tf.placeholder('int32', [N, None], name='bridge_word_in_context_ids') self.bridge_na = tf.placeholder('bool', [N], name='bridge_na') # if config.reasoning_layer == 'snmn': # self.module_prob_feed = tf.placeholder('float32', [3, N, 4], name='module_prob_feed') # Define misc self.tensor_dict = {} # Forward outputs / loss inputs self.logits = None self.yp = None self.var_list = None self.na_prob = None # Loss outputs self.loss = None self._build_forward() self._build_loss() self.var_ema = None if rep: self._build_var_ema() if config.mode == 'train': self._build_ema() self.summary = tf.summary.merge_all() self.summary = tf.summary.merge(tf.get_collection("summaries", scope=self.scope)) def _build_forward(self): config = self.config x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M] q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N] N, M, JX, JQ, VW, VC, d, W = \ config.batch_size, config.max_num_sents, config.max_sent_size, \ config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \ config.max_word_size JX = tf.shape(self.x)[2] JQ = tf.shape(self.q)[1] M = tf.shape(self.x)[1] dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size with tf.variable_scope("emb"): if config.use_char_emb: with tf.variable_scope("emb_var"), tf.device("/cpu:0"): char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float') with tf.variable_scope("char"): Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc] Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc] Acx = tf.reshape(Acx, [-1, JX, W, dc]) Acq = tf.reshape(Acq, [-1, JQ, W, dc]) filter_sizes = list(map(int, config.out_channel_dims.split(','))) heights = list(map(int, config.filter_heights.split(','))) assert sum(filter_sizes) == dco, (filter_sizes, dco) with tf.variable_scope("conv"): xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx") if config.share_cnn_weights: tf.get_variable_scope().reuse_variables() qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx") else: qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq") xx = tf.reshape(xx, [-1, M, JX, dco]) qq = tf.reshape(qq, [-1, JQ, dco]) if config.use_word_emb: with tf.variable_scope("emb_var"), tf.device("/cpu:0"): if config.mode == 'train': word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(self.emb_mat)) else: word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float') if config.use_glove_for_unk: word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat]) with tf.name_scope("word"): Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d] Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d] self.tensor_dict['x'] = Ax self.tensor_dict['q'] = Aq if config.use_char_emb: xx = tf.concat(axis=3, values=[xx, Ax]) # [N, M, JX, di] qq = tf.concat(axis=2, values=[qq, Aq]) # [N, JQ, di] else: xx = Ax qq = Aq # highway network if config.highway: with tf.variable_scope("highway"): xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob) tf.get_variable_scope().reuse_variables() qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, input_keep_prob=config.highway_keep_prob) self.tensor_dict['xx'] = xx self.tensor_dict['qq'] = qq with tf.variable_scope("prepro"): with tf.variable_scope('u1'): u, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, qq, q_len, self.is_train) if config.reasoning_layer == 'snmn': u_st = zhong_selfatt(u[:, ax, :, :], config.hidden_size*2, seq_len=q_len, transform='squeeze') if config.share_lstm_weights: with tf.variable_scope('u1', reuse=True): h, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(xx, axis=1), tf.squeeze(x_len, axis=1), self.is_train) h = h[:, ax, :, :] else: with tf.variable_scope('h1'): h, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(xx, axis=1), tf.squeeze(x_len, axis=1), self.is_train) h = h[:, ax, :, :] self.tensor_dict['u'] = u self.tensor_dict['h'] = h with tf.variable_scope("main"): context_dim = config.hidden_size * 2 ### Reconstruct before bidaf because otherwise we need to build a larger query tensor. x_mask = self.x_mask x_len_squeeze = tf.squeeze(x_len, axis=1) p0 = h ### Main model if config.reasoning_layer == 'snmn': module_names = ['_Find', '_Compare', '_Relocate', '_NoOp'] self.snmn = NMN_Model(config, u, qq, u_st, self.q_mask, q_len, p0, x_mask, x_len, module_names, \ self.is_train) self.u_weights = self.snmn.cv_list # question word distribution at each step self.module_prob_list = self.snmn.module_prob_list # module probability at each step g0 = tf.squeeze(self.snmn.att_second, axis=-1) if config.supervise_bridge_entity: self.hop0_logits = self.snmn.bridge_logits if config.self_att: with tf.variable_scope('g0'): g0, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(g0, axis=1), x_len_squeeze, self.is_train) g0 = g0[:, ax, :, :] g0 = hotpot_biattention(config, self.is_train, g0, tf.squeeze(g0, axis=1), h_mask=x_mask, u_mask=tf.squeeze(x_mask, axis=1), scope="self_att", tensor_dict=self.tensor_dict) g0 = tf.layers.dense(g0, config.hidden_size*2) with tf.variable_scope('g1'): g1, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(g0, axis=1), tf.squeeze(x_len, axis=1), self.is_train) g1 = g1[:, ax, :, :] logits = get_logits([g1, g0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1') with tf.variable_scope('g2'): a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX])) a1i = tf.tile(a1i[:, ax, ax, :], [1, M, JX, 1]) g2, _ = bi_cudnn_rnn_encoder('lstm', config.hidden_size, 1, 1-config.input_keep_prob, tf.squeeze(tf.concat(axis=3, values=[g0, g1, a1i, g0 * a1i]), axis=1), x_len_squeeze, self.is_train) g2 = g2[:, ax, :, :] logits2 = get_logits([g2, g1], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=x_mask, is_train=self.is_train, func=config.answer_func, scope='logits2') if config.dataset == 'hotpotqa': with tf.variable_scope('g3'): if config.nmn_qtype_class == 'mem_last': g3 = tf.concat([self.snmn.mem_last[:, ax, :], u_st[:, ax, :]], axis=-1) elif config.nmn_qtype_class == 'ctrl_st': g3 = self.snmn.c_st_list[0][:, ax, :] else: raise NotImplementedError self.predict_type = dense(g3, 2, scope='predict_type') g3_1 = self.snmn.mem_last[:, ax, :] self.predict_yesno = dense(g3_1, 2, scope='predict_yesno') flat_logits = tf.reshape(logits, [-1, M * JX]) flat_yp = tf.nn.softmax(flat_logits) # [-1, M * JX] flat_logits2 = tf.reshape(logits2, [-1, M * JX]) flat_yp2 = tf.nn.softmax(flat_logits2) yp = tf.reshape(flat_yp, [-1, M, JX]) yp2 = tf.reshape(flat_yp2, [-1, M, JX]) wyp = tf.nn.sigmoid(logits2) self.logits = flat_logits self.logits2 = flat_logits2 self.yp = yp self.yp2 = yp2 self.wyp = wyp if config.dataset == 'hotpotqa': flat_predict_type = tf.reshape(self.predict_type, [-1, 2]) flat_yp3 = tf.nn.softmax(flat_predict_type) self.yp3 = tf.reshape(flat_yp3, [-1, 1, 2]) flat_predict_yesno = tf.reshape(self.predict_yesno, [-1, 2]) flat_yp3_yesno = tf.nn.softmax(flat_predict_yesno) self.yp3_yesno = tf.reshape(flat_yp3_yesno, [-1, 1, 2]) def _build_loss(self): config = self.config M = tf.shape(self.x)[1] JX = tf.shape(self.x)[2] # loss_mask will mask out hotpotqa examples with yes/no type answer. loss_mask = tf.logical_and(tf.cast(tf.reduce_max(tf.cast(self.q_mask, 'float'), 1), 'bool'), tf.logical_not(self.na)) if config.supervise_bridge_entity: bridge_loss_mask = tf.cast(tf.logical_and(loss_mask, tf.logical_not(self.bridge_na)), 'float') if config.dataset == 'hotpotqa': yesno_mask = tf.cast(tf.logical_and(loss_mask, self.yes_no), 'float') loss_mask = tf.logical_and(loss_mask, tf.logical_not(self.yes_no)) loss_mask = tf.cast(loss_mask, 'float') q_loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1) losses = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits, labels=tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float')) losses2 = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float')) if config.dataset == 'hotpotqa': losses_type = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.predict_type, labels=self.q_type_labels) ce_loss_type = tf.reduce_mean(q_loss_mask * losses_type, name='loss_q_type') tf.summary.scalar(ce_loss_type.op.name, ce_loss_type) tf.add_to_collection('ema/scalar', ce_loss_type) tf.add_to_collection("losses", ce_loss_type) losses_yesno = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.predict_yesno, labels=self.q_yesno_labels) ce_loss_yesno = tf.reduce_mean(yesno_mask * losses_yesno, name='loss_q_yesno') * config.yesno_loss_coeff tf.summary.scalar(ce_loss_yesno.op.name, ce_loss_yesno) tf.add_to_collection('ema/scalar', ce_loss_yesno) tf.add_to_collection("losses", ce_loss_yesno) ce_loss = tf.reduce_mean(loss_mask * losses) ce_loss2 = tf.reduce_mean(loss_mask * losses2) tf.add_to_collection('losses', ce_loss) tf.add_to_collection("losses", ce_loss2) self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss') tf.summary.scalar(self.loss.op.name, self.loss) tf.add_to_collection('ema/scalar', self.loss) if config.supervise_bridge_entity: bridge_word_ids = tf.squeeze(tf.slice(self.bridge_word_in_context_ids, [0, 0], [-1, 1]), axis=1) hop0_attn_losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=self.hop0_logits, labels=bridge_word_ids) hop0_attn_loss = tf.reduce_mean(hop0_attn_losses * bridge_loss_mask, name='hop0_attn_loss') tf.summary.scalar('hop0_attn_loss', hop0_attn_loss) tf.add_to_collection('ema/scalar', hop0_attn_loss) self.loss += config.hop0_attn_loss_coeff * hop0_attn_loss tf.summary.scalar('total_loss', self.loss) def _build_ema(self): self.ema = tf.train.ExponentialMovingAverage(self.config.decay) ema = self.ema tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope) ema_op = ema.apply(tensors) for var in tf.get_collection("ema/scalar", scope=self.scope): ema_var = ema.average(var) tf.summary.scalar(ema_var.op.name, ema_var) for var in tf.get_collection("ema/vector", scope=self.scope): ema_var = ema.average(var) tf.summary.histogram(ema_var.op.name, ema_var) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) def _build_var_ema(self): self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay) ema = self.var_ema ema_op = ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) def get_loss(self): return self.loss def get_global_step(self): return self.global_step def get_var_list(self, model_name): if model_name == 'model_network': var_list = [var for var in tf.trainable_variables() if 'reward_network' not in var.name and 'ranker' not in var.name and 'controller'
Leakage': 0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 0.220301, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 6.09367, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202691, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 1.01201e-05, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.161674, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.260775, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.13163, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.554079, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.184908, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 4.1847, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.91191e-06, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00678134, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0490392, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0501522, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.0490411, 'Execution Unit/Register Files/Runtime Dynamic': 0.0569335, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.103313, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.26523, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 1.48431, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00232673, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00232673, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00209279, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000846362, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00072044, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0074667, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0199431, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0482126, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.06673, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.180483, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.163752, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 5.43408, 'Instruction Fetch Unit/Runtime Dynamic': 0.419857, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0375896, 'L2/Runtime Dynamic': 0.00801335, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 2.70657, 'Load Store Unit/Data Cache/Runtime Dynamic': 0.717905, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0475403, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0475404, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 2.93107, 'Load Store Unit/Runtime Dynamic': 0.999899, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.117226, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.234453, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.041604, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0420158, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.190678, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.0300401, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.418255, 'Memory Management Unit/Runtime Dynamic': 0.0720559, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 16.5952, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 5.40471e-06, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.00729436, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0820171, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage':
<reponame>Friz64/Vulkan-Docs #!/usr/bin/python3 -i # # Copyright 2013-2021 The Khronos Group Inc. # # SPDX-License-Identifier: Apache-2.0 """Base class for source/header/doc generators, as well as some utility functions.""" from __future__ import unicode_literals import io import os import pdb import re import shutil import sys import tempfile try: from pathlib import Path except ImportError: from pathlib2 import Path from spec_tools.util import getElemName, getElemType def write(*args, **kwargs): file = kwargs.pop('file', sys.stdout) end = kwargs.pop('end', '\n') file.write(' '.join(str(arg) for arg in args)) file.write(end) def noneStr(s): """Return string argument, or "" if argument is None. Used in converting etree Elements into text. s - string to convert""" if s: return s return "" def enquote(s): """Return string argument with surrounding quotes, for serialization into Python code.""" if s: if isinstance(s, str): return "'{}'".format(s) else: return s return None def regSortCategoryKey(feature): """Sort key for regSortFeatures. Sorts by category of the feature name string: - Core API features (those defined with a `<feature>` tag) - ARB/KHR/OES (Khronos extensions) - other (EXT/vendor extensions)""" if feature.elem.tag == 'feature': return 0 if (feature.category == 'ARB' or feature.category == 'KHR' or feature.category == 'OES'): return 1 return 2 def regSortOrderKey(feature): """Sort key for regSortFeatures - key is the sortorder attribute.""" # print("regSortOrderKey {} -> {}".format(feature.name, feature.sortorder)) return feature.sortorder def regSortFeatureVersionKey(feature): """Sort key for regSortFeatures - key is the feature version. `<extension>` elements all have version number 0.""" return float(feature.versionNumber) def regSortExtensionNumberKey(feature): """Sort key for regSortFeatures - key is the extension number. `<feature>` elements all have extension number 0.""" return int(feature.number) def regSortFeatures(featureList): """Default sort procedure for features. - Sorts by explicit sort order (default 0) relative to other features - then by feature category ('feature' or 'extension'), - then by version number (for features) - then by extension number (for extensions)""" featureList.sort(key=regSortExtensionNumberKey) featureList.sort(key=regSortFeatureVersionKey) featureList.sort(key=regSortCategoryKey) featureList.sort(key=regSortOrderKey) class GeneratorOptions: """Base class for options used during header/documentation production. These options are target language independent, and used by Registry.apiGen() and by base OutputGenerator objects.""" def __init__(self, conventions=None, filename=None, directory='.', genpath=None, apiname=None, profile=None, versions='.*', emitversions='.*', defaultExtensions=None, addExtensions=None, removeExtensions=None, emitExtensions=None, emitSpirv=None, reparentEnums=True, sortProcedure=regSortFeatures): """Constructor. Arguments: - conventions - may be mandatory for some generators: an object that implements ConventionsBase - filename - basename of file to generate, or None to write to stdout. - directory - directory in which to generate files - genpath - path to previously generated files, such as api.py - apiname - string matching `<api>` 'apiname' attribute, e.g. 'gl'. - profile - string specifying API profile , e.g. 'core', or None. - versions - regex matching API versions to process interfaces for. Normally `'.*'` or `'[0-9][.][0-9]'` to match all defined versions. - emitversions - regex matching API versions to actually emit interfaces for (though all requested versions are considered when deciding which interfaces to generate). For GL 4.3 glext.h, this might be `'1[.][2-5]|[2-4][.][0-9]'`. - defaultExtensions - If not None, a string which must in its entirety match the pattern in the "supported" attribute of the `<extension>`. Defaults to None. Usually the same as apiname. - addExtensions - regex matching names of additional extensions to include. Defaults to None. - removeExtensions - regex matching names of extensions to remove (after defaultExtensions and addExtensions). Defaults to None. - emitExtensions - regex matching names of extensions to actually emit interfaces for (though all requested versions are considered when deciding which interfaces to generate). to None. - emitSpirv - regex matching names of extensions and capabilities to actually emit interfaces for. - reparentEnums - move <enum> elements which extend an enumerated type from <feature> or <extension> elements to the target <enums> element. This is required for almost all purposes, but the InterfaceGenerator relies on the list of interfaces in the <feature> or <extension> being complete. Defaults to True. - sortProcedure - takes a list of FeatureInfo objects and sorts them in place to a preferred order in the generated output. Default is core API versions, ARB/KHR/OES extensions, all other extensions, by core API version number or extension number in each group. The regex patterns can be None or empty, in which case they match nothing.""" self.conventions = conventions """may be mandatory for some generators: an object that implements ConventionsBase""" self.filename = filename "basename of file to generate, or None to write to stdout." self.genpath = genpath """path to previously generated files, such as api.py""" self.directory = directory "directory in which to generate filename" self.apiname = apiname "string matching `<api>` 'apiname' attribute, e.g. 'gl'." self.profile = profile "string specifying API profile , e.g. 'core', or None." self.versions = self.emptyRegex(versions) """regex matching API versions to process interfaces for. Normally `'.*'` or `'[0-9][.][0-9]'` to match all defined versions.""" self.emitversions = self.emptyRegex(emitversions) """regex matching API versions to actually emit interfaces for (though all requested versions are considered when deciding which interfaces to generate). For GL 4.3 glext.h, this might be `'1[.][2-5]|[2-4][.][0-9]'`.""" self.defaultExtensions = defaultExtensions """If not None, a string which must in its entirety match the pattern in the "supported" attribute of the `<extension>`. Defaults to None. Usually the same as apiname.""" self.addExtensions = self.emptyRegex(addExtensions) """regex matching names of additional extensions to include. Defaults to None.""" self.removeExtensions = self.emptyRegex(removeExtensions) """regex matching names of extensions to remove (after defaultExtensions and addExtensions). Defaults to None.""" self.emitExtensions = self.emptyRegex(emitExtensions) """regex matching names of extensions to actually emit interfaces for (though all requested versions are considered when deciding which interfaces to generate).""" self.emitSpirv = self.emptyRegex(emitSpirv) """regex matching names of extensions and capabilities to actually emit interfaces for.""" self.reparentEnums = reparentEnums """boolean specifying whether to remove <enum> elements from <feature> or <extension> when extending an <enums> type.""" self.sortProcedure = sortProcedure """takes a list of FeatureInfo objects and sorts them in place to a preferred order in the generated output. Default is core API versions, ARB/KHR/OES extensions, all other extensions, alphabetically within each group.""" self.codeGenerator = False """True if this generator makes compilable code""" def emptyRegex(self, pat): """Substitute a regular expression which matches no version or extension names for None or the empty string.""" if not pat: return '_nomatch_^' return pat class OutputGenerator: """Generate specified API interfaces in a specific style, such as a C header. Base class for generating API interfaces. Manages basic logic, logging, and output file control. Derived classes actually generate formatted output. """ # categoryToPath - map XML 'category' to include file directory name categoryToPath = { 'bitmask': 'flags', 'enum': 'enums', 'funcpointer': 'funcpointers', 'handle': 'handles', 'define': 'defines', 'basetype': 'basetypes', } def __init__(self, errFile=sys.stderr, warnFile=sys.stderr, diagFile=sys.stdout): """Constructor - errFile, warnFile, diagFile - file handles to write errors, warnings, diagnostics to. May be None to not write.""" self.outFile = None self.errFile = errFile self.warnFile = warnFile self.diagFile = diagFile # Internal state self.featureName = None self.genOpts = None self.registry = None self.featureDictionary = {} # Used for extension enum value generation self.extBase = 1000000000 self.extBlockSize = 1000 self.madeDirs = {} # API dictionary, which may be loaded by the beginFile method of # derived generators. self.apidict = None def logMsg(self, level, *args): """Write a message of different categories to different destinations. - `level` - 'diag' (diagnostic, voluminous) - 'warn' (warning) - 'error' (fatal error - raises exception after logging) - `*args` - print()-style arguments to direct to corresponding log""" if level == 'error': strfile = io.StringIO() write('ERROR:', *args, file=strfile) if self.errFile is not None: write(strfile.getvalue(), file=self.errFile) raise UserWarning(strfile.getvalue()) elif level == 'warn': if self.warnFile is not None: write('WARNING:', *args, file=self.warnFile) elif level == 'diag': if self.diagFile is not None: write('DIAG:', *args, file=self.diagFile) else: raise UserWarning( '*** FATAL ERROR in Generator.logMsg: unknown level:' + level) def enumToValue(self, elem, needsNum, bitwidth = 32, forceSuffix = False): """Parse and convert an `<enum>` tag into a value. Returns a list: - first element - integer representation of the value, or None if needsNum is False. The value must be a legal number if needsNum is True. - second element - string representation of the value There are several possible representations of values. - A 'value' attribute simply contains the value. - A 'bitpos' attribute defines a value
import os import pathlib import torch import numpy as np from imageio import imread from scipy import linalg from torch.nn.functional import adaptive_avg_pool2d from skimage.measure import compare_ssim from skimage.measure import compare_psnr import glob import argparse import matplotlib.pyplot as plt from tools.inception import InceptionV3 import lpips # from tools.PerceptualSimilarity.models import dist_model as dm import pandas as pd import json import imageio, cv2 from skimage.draw import circle, line_aa, polygon from tqdm import tqdm def resort(fns): #import pdb; pdb.set_trace() pairLst = "/shared/rsaas/aiyucui2/inshop/fashion_yifang/fasion-pairs-test.csv" with open(pairLst) as f: anns = f.readlines() anns = [line[:-1].split(",")[1] for line in anns[1:]] #_, sort_idx = sorted(anns) sorted_idx = [i[0] for i in sorted(enumerate(anns), key=lambda x:x[1])] new_fns = [fns[i] for i in sorted_idx] return new_fns class FID(): """docstring for FID Calculates the Frechet Inception Distance (FID) to evalulate GANs The FID metric calculates the distance between two distributions of images. Typically, we have summary statistics (mean & covariance matrix) of one of these distributions, while the 2nd distribution is given by a GAN. When run as a stand-alone program, it compares the distribution of images that are stored as PNG/JPEG at a specified location with a distribution given by summary statistics (in pickle format). The FID is calculated by assuming that X_1 and X_2 are the activations of the pool_3 layer of the inception net for generated samples and real world samples respectivly. See --help to see further details. Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead of Tensorflow Copyright 2018 Institute of Bioinformatics, <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ def __init__(self): self.dims = 2048 self.batch_size = 64 self.cuda = True self.verbose=False block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[self.dims] self.model = InceptionV3([block_idx]) if self.cuda: # TODO: put model into specific GPU self.model.cuda() def __call__(self, images, gt_path): """ images: list of the generated image. The values must lie between 0 and 1. gt_path: the path of the ground truth images. The values must lie between 0 and 1. """ if not os.path.exists(gt_path): raise RuntimeError('Invalid path: %s' % gt_path) print('calculate gt_path statistics...') m1, s1 = self.compute_statistics_of_path(gt_path, self.verbose) print('calculate generated_images statistics...') m2, s2 = self.calculate_activation_statistics(images, self.verbose) fid_value = self.calculate_frechet_distance(m1, s1, m2, s2) return fid_value def calculate_from_disk(self, imgs, gt_path): """ """ if not os.path.exists(gt_path): raise RuntimeError('Invalid path: %s' % gt_path) #if not os.path.exists(generated_path): # raise RuntimeError('Invalid path: %s' % generated_path) print('calculate gt_path statistics...') m1, s1 = self.compute_statistics_of_path(gt_path, self.verbose) # m1, s1 = self.compute_statistics_of_images(imgs_1, self.verbose) print('calculate generated_path statistics...') m2, s2 = self.compute_statistics_of_images(imgs, self.verbose) print('calculate frechet distance...') fid_value = self.calculate_frechet_distance(m1, s1, m2, s2) print('fid_distance %f' % (fid_value)) return fid_value def compute_statistics_of_images(self, imgs, verbose): # Bring images to shape (B, 3, H, W) imgs = np.array(imgs).astype(np.float32) imgs = imgs.transpose((0, 3, 1, 2)) # Rescale images to be between 0 and 1 imgs /= 255 m, s = self.calculate_activation_statistics(imgs, verbose) # np.savez(npz_file, mu=m, sigma=s) return m, s def compute_statistics_of_path(self, path, verbose): npz_file = os.path.join(path, 'statistics.npz') if os.path.exists(npz_file): f = np.load(npz_file) m, s = f['mu'][:], f['sigma'][:] f.close() else: path = pathlib.Path(path) files = list(path.glob('*.jpg')) # + list(path.glob('*.png')) imgs = np.array([cv2.resize(imread(str(fn)).astype(np.float32), (256, 256)) for fn in files]) # Bring images to shape (B, 3, H, W) imgs = imgs.transpose((0, 3, 1, 2)) # Rescale images to be between 0 and 1 imgs /= 255 m, s = self.calculate_activation_statistics(imgs, verbose) np.savez(npz_file, mu=m, sigma=s) return m, s def calculate_activation_statistics(self, images, verbose): """Calculation of the statistics used by the FID. Params: -- images : Numpy array of dimension (n_images, 3, hi, wi). The values must lie between 0 and 1. -- model : Instance of inception model -- batch_size : The images numpy array is split into batches with batch size batch_size. A reasonable batch size depends on the hardware. -- dims : Dimensionality of features returned by Inception -- cuda : If set to True, use GPU -- verbose : If set to True and parameter out_step is given, the number of calculated batches is reported. Returns: -- mu : The mean over samples of the activations of the pool_3 layer of the inception model. -- sigma : The covariance matrix of the activations of the pool_3 layer of the inception model. """ act = self.get_activations(images, verbose) mu = np.mean(act, axis=0) sigma = np.cov(act, rowvar=False) return mu, sigma def get_activations(self, images, verbose=False): """Calculates the activations of the pool_3 layer for all images. Params: -- images : Numpy array of dimension (n_images, 3, hi, wi). The values must lie between 0 and 1. -- model : Instance of inception model -- batch_size : the images numpy array is split into batches with batch size batch_size. A reasonable batch size depends on the hardware. -- dims : Dimensionality of features returned by Inception -- cuda : If set to True, use GPU -- verbose : If set to True and parameter out_step is given, the number of calculated batches is reported. Returns: -- A numpy array of dimension (num images, dims) that contains the activations of the given tensor when feeding inception with the query tensor. """ self.model.eval() d0 = images.shape[0] if self.batch_size > d0: print(('Warning: batch size is bigger than the data size. ' 'Setting batch size to data size')) self.batch_size = d0 n_batches = d0 // self.batch_size n_used_imgs = n_batches * self.batch_size pred_arr = np.empty((n_used_imgs, self.dims)) for i in range(n_batches): if verbose: print('\rPropagating batch %d/%d' % (i + 1, n_batches)) # end='', flush=True) start = i * self.batch_size end = start + self.batch_size batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor) # batch = Variable(batch, volatile=True) if self.cuda: batch = batch.cuda() pred = self.model(batch)[0] # If model output is not scalar, apply global spatial average pooling. # This happens if you choose a dimensionality not equal 2048. if pred.shape[2] != 1 or pred.shape[3] != 1: pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) pred_arr[start:end] = pred.cpu().data.numpy().reshape(self.batch_size, -1) if verbose: print(' done') return pred_arr def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by <NAME>. Params: -- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations, precalculated on an representive data set. -- sigma1: The covariance matrix over activations for generated samples. -- sigma2: The covariance matrix over activations, precalculated on an representive data set. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, \ 'Training and test mean vectors have different lengths' assert sigma1.shape == sigma2.shape, \ 'Training and test covariances have different dimensions' diff = mu1 - mu2 # Product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = ('fid calculation produces singular product; ' 'adding %s to diagonal of cov estimates') % eps print(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError('Imaginary component {}'.format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean) class Reconstruction_Metrics(): def __init__(self, metric_list=['ssim', 'psnr', 'l1', 'mae'], data_range=1, win_size=51, multichannel=True): self.data_range = data_range self.win_size = win_size self.multichannel = multichannel for metric in metric_list: if metric in ['ssim', 'psnr', 'l1', 'mae']: setattr(self, metric, True) else: print('unsupport reconstruction metric: %s'%metric) def __call__(self, inputs, gts): """
<reponame>anybus/pythonMySQL #!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'Frankie' from config import * from mysql.connector import errorcode import mysql.connector import traceback import sys import re class pythonMySQL(object): configs = {} # 设置连接参数,配置信息(字典) links = {} # 保存连接标识符(字典) NumberLink = 0 # 保存数据库连接数量/配置信息数量 current = 0 # 标识当前对应的数据库配置,可以是数字或者字符串 config = {} # 保存当前模型的数据库配置 con = None # 保存连接标识符 cur = None # 保存数据库游标 dbdebug = False # 是否开启DEBUG模式 database = '' # 记录连接的数据库 table_name = '' # 记录操作的数据表名 columns = [] # 记录表中字段名 connected = False # 是否连接成功 queryStr = '' # 保存最后执行的操作 SQLerror = {} # SQL执行报错错误信息 lastInsertId = 0 # 保存上一步插入操作产生AUTO_INCREMENT numRows = 0 # 上一步操作产生受影响的记录的条数 tmp_table = '' aliasString = '' fieldString = '' joinString = '' whereString = '' groupString = '' havingString = '' orderString = '' limitString = '' fetchSql = False whereStringArray = [] whereValueArray = [] SQL_logic = ['AND', 'OR', 'XOR'] # SQL语句支持的逻辑运算符 # 对于参数dbConfig,需为dict,包含host、port、user、password、database、charset、autocommit、DB_DEBUG、MYSQL_LOG,至少须包含user、password、database def __init__(self, dbtable, ConfigID=0, dbConfig=None): if not isinstance(ConfigID, (int, str)): self.throw_exception("第二个参数只能是数字或字符串", True) # 将类变量中的可变元素初始化 self.columns = [] # 记录表中字段名 self.whereStringArray = [] self.whereValueArray = [] self.SQLerror = {} # 如果数据库配置已被存在self::$configs中时 if ConfigID in pythonMySQL.configs: if dbConfig != None: self.throw_exception( '数据库配置编号' + (str(ConfigID) if isinstance(ConfigID, int) else "'" + ConfigID + "'") + '已被占用', True) self.init(ConfigID, dbtable) return # 以下为数据库配置还未被存在self::$configs中时 if dbConfig == None: if not isset('CONFIG'): self.throw_exception("配置文件未定义CONFIG", True) # 检查配置文件中是否有对应的配置信息 if ConfigID not in CONFIG: self.throw_exception( "配置文件中无" + (str(ConfigID) if isinstance(ConfigID, int) else "'" + ConfigID + "'") + "的配置信息", True) # 使用配置文件中对应的配置 if ConfigID == 0: dbConfig = CONFIG[0] else: dbConfig = dict(CONFIG[0]) dbConfig.update(CONFIG[ConfigID]) if 'DB_DEBUG' in dbConfig: if dbConfig['DB_DEBUG'] == True: self.dbdebug = True del dbConfig['DB_DEBUG'] if 'password' not in dbConfig: if 'password' in CONFIG[0]: dbConfig['password'] = CONFIG[0]['password'] else: self.throw_exception('数据库未设置密码') if 'host' not in dbConfig: dbConfig['host'] = '127.0.0.1' if 'user' not in dbConfig: dbConfig['user'] = 'root' if 'port' not in dbConfig: dbConfig['port'] = '3306' if 'autocommit' not in dbConfig: dbConfig['autocommit'] = True if 'dbms' not in dbConfig: dbConfig['dbms'] = 'mysql' pythonMySQL.configs[ConfigID] = dbConfig self.current = ConfigID self.config = dbConfig self.database = dbConfig['database'] del dbConfig['dbms'] try: self.con = mysql.connector.connect(**dbConfig) self.cur = self.con.cursor(dictionary=True) except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: msg = "Something is wrong with your user name or password" elif err.errno == errorcode.ER_BAD_DB_ERROR: msg = "Database does not exist" else: msg = err self.throw_exception('数据库连接错误:' + msg) # 设置 self.link, self.table_name, self.connected if self.cur: pythonMySQL.links[ConfigID] = self.con else: self.throw_exception('数据库连接错误') if self.in_db(dbtable): self.table_name = dbtable else: self.throw_exception('数据库' + dbConfig['database'] + '中不存在' + dbtable + '表') self.connected = True # 最后将数据库连接数 + 1 pythonMySQL.NumberLink += 1 # 初始化私有变量 def init(self, current, dbtable): self.current = current self.config = pythonMySQL.configs[current] self.con = pythonMySQL.links[current] self.cur = self.con.cursor(dictionary=True) if 'DB_DEBUG' in self.config and self.config['DB_DEBUG'] == True: self.dbdebug = True self.database = self.config['database'] if self.in_db(dbtable): self.table_name = dbtable else: self.throw_exception('数据库' + self.config['database'] + '中不存在' + dbtable + '表') self.connected = True def in_db(self, dbtable): self.cur.execute('show tables') tables = self.cur.fetchall() key = 'Tables_in_' + self.database for table in tables: if dbtable == table[key]: return True return False def set_columns(self, dbtable): self.cur.execute("SHOW COLUMNS FROM `" + dbtable + "`") columns = self.cur.fetchall() self.columns = ['', ] for column in columns: if column['Key'] == 'PRI': self.columns[0] = column['Field'] self.columns.append(column['Field']) def get_columns(self): return self.cur.column_names # 字符串查询 # where("id = 1 and nick = 'frankie'") # where("id = %d and nick = '%s'", 1, 'frankie') # where("id = %d and nick = '%s'", (1, 'frankie')) # where("id = %d and nick = '%s'", [1, 'frankie']) # 字典查询 # where({'id':1, 'nick':'frankie'}) # where({'id&nick':"1"}) # WHERE `id`='1' AND `nick`='1' # where({'id&nick':[1, 'frankie']}) = where({'id&nick':[1, 'frankie', '', 's']}) # 其中's'代表single单对应 # where({'id':[1, 2, 3, 'or', 'm']}) # WHERE `id`=1 OR `id`=2 OR `id`=3 # 其中'm'代表multi多对应 # where({'id&nick':[1, 'frankie', 'or', 'm']}) # WHERE (`id`=1 OR `id`='frankie') AND (`nick`=1 OR `nick`='frankie') # 其中'm'代表multi多对应 # 更多详见文档 def where(self, *where): param_number = len(where) if isinstance(where[0], str): if param_number == 1: whereSubString = '( ' + where[0] + ' )' elif param_number > 1: if isinstance(where[1], tuple): whereSubString = where[0] % where[1] elif isinstance(where[1], list): whereSubString = where[0] % tuple(where[1]) else: param_array = [] for i in range(1, param_number): param_array.append(where[i]) whereSubString = where[0] % tuple(param_array) whereSubString = '( ' + whereSubString + ' )' elif isinstance(where[0], dict): whereSubString = self._parseWhereArrayParam(where[0]) else: self.throw_exception("where子句的参数只支持字符串和字典") self.whereStringArray.append(whereSubString) return self def parseWhere(self): length = len(self.whereStringArray) if length == 0: return if length > 1: self.whereString = ' WHERE ( ' + self.whereStringArray[0] + ' )' for i in range(1, length): self.whereString += ' AND ( ' + self.whereStringArray[i] + ' )' else: self.whereString = ' WHERE ' + self.whereStringArray[0] # table('table_name') | table('table_name AS t') | table('database.table_name AS t1') # table({'table_name':'', 'table_name':'t', 'database.table_name':'t1'}) def table(self, table): if isinstance(table, str): self.tmp_table = table elif isinstance(table, dict): if len(table) == 0: self.throw_exception('table子句参数不能传空字典') self.tmp_table = '' for key, val in table.items(): if val != '': strpos = key.find('.') if strpos == -1: self.tmp_table += '`' + key.strip() + '` AS `' + val.strip() + '`,' else: self.tmp_table += key.strip() + ' AS `' + val.strip() + '`,' else: strpos = key.find('.') if strpos == -1: self.tmp_table += '`' + key.strip() + '`,' else: self.tmp_table += key.strip() + ',' self.tmp_table = self.tmp_table.rstrip(',') else: self.throw_exception('table子句的参数类型错误:"' + table + '"') return self def alias(self, alias): self.aliasString = ' AS `' + alias + '`' return self # field() | field('') | field('*') | field(True) | field('id,username as name, db.pass') # field({'id':'', 'username':'name', 'db.pass':''}) # field('sex,head', True) | field(('sex', 'head'), True) 过滤sex和head字段 def field(self, field='', filter=False): if field == True: # 显示调用所有字段 self.set_columns(self.table_name if not self.tmp_table else self.tmp_table) self.fieldString += ' ' columns_array = self.columns columns_array.pop(0) for column in columns_array: self.fieldString += '`' + column + '`,' self.fieldString = self.fieldString.rstrip(',') return self if filter: # 过滤字段 if not isinstance(field, (str, set, list, tuple)): self.throw_exception("过滤字段时,field子句的参数只支持字符串或set、list、tuple") self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table) columns_list = self.columns columns_list.pop(0) columns_dict = {} for index, item in enumerate(columns_list): columns_dict[str(index)] = item explode_array = [] if isinstance(field, str): explode_array = re.split('\s{0,},\s{0,}', field.strip()) else: for single_field in field: explode_array.append(single_field.strip()) for index, item in list(columns_dict.items()): if item in explode_array: columns_dict.pop(index) for index, item in columns_dict.items(): self.fieldString += '`' + item + '`,' self.fieldString = ' ' + self.fieldString.rstrip(',') return self if field == '' or field == '*': self.fieldString = ' *' return self if isinstance(field, str): field_array = field.split(',') field_array = list(map(self._addSpecialChar, field_array)) self.fieldString = ','.join([item for item in field_array]) elif isinstance(field, dict): for key, val in field.items(): if val == '': after_process_key = self._addSpecialChar(key) self.fieldString += after_process_key + ',' else: after_process_key = self._addSpecialChar(key) after_process_val = self._addSpecialChar(val) self.fieldString += after_process_key + ' AS ' + after_process_val + ',' self.fieldString = self.fieldString.rstrip(',') else: self.throw_exception("field子句的参数只支持字符串或dict") self.fieldString = ' ' + self.fieldString return self def order(self, order): if isinstance(order, str): self.orderString = ' ORDER BY ' + order elif isinstance(order, dict): self.orderString = ' ORDER BY ' for key, val in order.items(): if val == '': self.orderString += '`' + key.strip() + '`,' else: if val.lower() != 'asc' and val.lower() != 'desc': self.throw_exception("order子句请使用asc或desc关键词指定排序,默认为asc,出现未知字符") self.orderString += '`' + key.strip() + '` ' + val + ',' self.orderString = self.orderString.rstrip(',') else: self.throw_exception("order子句的参数只支持字符串和字典") return self def limit(self, *limit): param_number = len(limit) if param_number == 1: if not isinstance(limit[0], (int, str)): self.throw_exception("limit子句的参数非法") if isinstance(limit[0], str): if not re.match('^\d+\s{0,},\s{0,}\d+$', limit[0].strip()) and not re.match('^\d+$', limit[0].strip()): self.throw_exception("limit子句的参数非法") self.limitString = ' LIMIT ' + str(limit[0]) elif param_number == 2: for i in range(2): if not is_numeric(limit[i]): self.throw_exception("limit子句的参数非法") self.limitString = ' LIMIT ' + str(limit[0]) + ',' + str(limit[1]) else: self.throw_exception("limit子句的参数数量必须为一或两个") return self def page(self, page_number, amount): if not is_numeric(page_number) or not is_numeric(amount): self.throw_exception("page方法只支持两个数字参数的写法") start = (int(page_number) - 1) * int(amount) self.limitString = ' LIMIT ' + str(start) + ',' + str(amount) return self def group(self, group): if not isinstance(group, str): self.throw_exception("group子句的参数只支持字符串") self.groupString = ' GROUP BY ' + group return self def having(self, having): if not isinstance(having, str): self.throw_exception("having子句的参数只支持字符串") self.havingString = ' HAVING BY
<gh_stars>1-10 #!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' --- module: ce_startup short_description: Manages a system startup information on HUAWEI CloudEngine switches. description: - Manages a system startup information on HUAWEI CloudEngine switches. author: - <NAME> (@QijunPan) notes: - Recommended connection is C(network_cli). - This module also works with C(local) connections for legacy playbooks. options: cfg_file: description: - Name of the configuration file that is applied for the next startup. The value is a string of 5 to 255 characters. default: present software_file: description: - File name of the system software that is applied for the next startup. The value is a string of 5 to 255 characters. patch_file: description: - Name of the patch file that is applied for the next startup. slot: description: - Position of the device.The value is a string of 1 to 32 characters. The possible value of slot is all, slave-board, or the specific slotID. action: description: - Display the startup information. choices: ['display'] ''' EXAMPLES = ''' - name: Startup module test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: Display startup information community.network.ce_startup: action: display provider: "{{ cli }}" - name: Set startup patch file community.network.ce_startup: patch_file: 2.PAT slot: all provider: "{{ cli }}" - name: Set startup software file community.network.ce_startup: software_file: aa.cc slot: 1 provider: "{{ cli }}" - name: Set startup cfg file community.network.ce_startup: cfg_file: 2.cfg slot: 1 provider: "{{ cli }}" ''' RETURN = ''' changed: description: check to see if a change was made on the device returned: always type: bool sample: true proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"patch_file": "2.PAT", "slot": "all"} existing: description: k/v pairs of existing aaa server returned: always type: dict sample: { "configSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc", "curentPatchFile": "NULL", "curentStartupFile": "NULL", "curentSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc", "nextPatchFile": "flash:/1.PAT", "nextStartupFile": "flash:/1.cfg", "nextSysSoft": "flash:/CE12800-V200R002C20_issuB071.cc", "position": "5" } end_state: description: k/v pairs of aaa params after module execution returned: always type: dict sample: {"StartupInfos": null} updates: description: command sent to the device returned: always type: list sample: {"startup patch 2.PAT all"} ''' import re from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.network.plugins.module_utils.network.cloudengine.ce import ce_argument_spec, run_commands from ansible.module_utils.connection import exec_command class StartUp(object): """ Manages system startup information. """ def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.init_module() # module input info self.cfg_file = self.module.params['cfg_file'] self.software_file = self.module.params['software_file'] self.patch_file = self.module.params['patch_file'] self.slot = self.module.params['slot'] self.action = self.module.params['action'] # state self.changed = False self.updates_cmd = list() self.results = dict() self.existing = dict() self.proposed = dict() self.end_state = dict() # system startup info self.startup_info = None def init_module(self): """ init module """ self.module = AnsibleModule( argument_spec=self.spec, supports_check_mode=True) def check_response(self, xml_str, xml_name): """Check if response message is already succeed.""" if "<ok/>" not in xml_str: self.module.fail_json(msg='Error: %s failed.' % xml_name) def get_startup_dict(self): """Retrieves the current config from the device or cache """ cmd = 'display startup' rc, out, err = exec_command(self.module, cmd) if rc != 0: self.module.fail_json(msg=err) cfg = str(out).strip() startup_info = dict() startup_info["StartupInfos"] = list() if not cfg: return startup_info else: re_find = re.findall(r'(.*)\s*' r'\s*Configured\s*startup\s*system\s*software:\s*(.*)' r'\s*Startup\s*system\s*software:\s*(.*)' r'\s*Next\s*startup\s*system\s*software:\s*(.*)' r'\s*Startup\s*saved-configuration\s*file:\s*(.*)' r'\s*Next\s*startup\s*saved-configuration\s*file:\s*(.*)' r'\s*Startup\s*paf\s*file:\s*(.*)' r'\s*Next\s*startup\s*paf\s*file:\s*(.*)' r'\s*Startup\s*patch\s*package:\s*(.*)' r'\s*Next\s*startup\s*patch\s*package:\s*(.*)', cfg) if re_find: for mem in re_find: startup_info["StartupInfos"].append( dict(nextStartupFile=mem[5], configSysSoft=mem[1], curentSysSoft=mem[2], nextSysSoft=mem[3], curentStartupFile=mem[4], curentPatchFile=mem[8], nextPatchFile=mem[9], postion=mem[0])) return startup_info return startup_info def get_cfg_filename_type(self, filename): """Gets the type of cfg filename, such as cfg, zip, dat...""" if filename is None: return None if ' ' in filename: self.module.fail_json( msg='Error: Configuration file name include spaces.') iftype = None if filename.endswith('.cfg'): iftype = 'cfg' elif filename.endswith('.zip'): iftype = 'zip' elif filename.endswith('.dat'): iftype = 'dat' else: return None return iftype.lower() def get_pat_filename_type(self, filename): """Gets the type of patch filename, such as cfg, zip, dat...""" if filename is None: return None if ' ' in filename: self.module.fail_json( msg='Error: Patch file name include spaces.') iftype = None if filename.endswith('.PAT'): iftype = 'PAT' else: return None return iftype.upper() def get_software_filename_type(self, filename): """Gets the type of software filename, such as cfg, zip, dat...""" if filename is None: return None if ' ' in filename: self.module.fail_json( msg='Error: Software file name include spaces.') iftype = None if filename.endswith('.cc'): iftype = 'cc' else: return None return iftype.lower() def startup_next_cfg_file(self): """set next cfg file""" commands = list() cmd = {'output': None, 'command': ''} if self.slot: cmd['command'] = "startup saved-configuration %s slot %s" % ( self.cfg_file, self.slot) commands.append(cmd) self.updates_cmd.append( "startup saved-configuration %s slot %s" % (self.cfg_file, self.slot)) run_commands(self.module, commands) self.changed = True else: cmd['command'] = "startup saved-configuration %s" % self.cfg_file commands.append(cmd) self.updates_cmd.append( "startup saved-configuration %s" % self.cfg_file) run_commands(self.module, commands) self.changed = True def startup_next_software_file(self): """set next software file""" commands = list() cmd = {'output': None, 'command': ''} if self.slot: if self.slot == "all" or self.slot == "slave-board": cmd['command'] = "startup system-software %s %s" % ( self.software_file, self.slot) commands.append(cmd) self.updates_cmd.append( "startup system-software %s %s" % (self.software_file, self.slot)) run_commands(self.module, commands) self.changed = True else: cmd['command'] = "startup system-software %s slot %s" % ( self.software_file, self.slot) commands.append(cmd) self.updates_cmd.append( "startup system-software %s slot %s" % (self.software_file, self.slot)) run_commands(self.module, commands) self.changed = True if not self.slot: cmd['command'] = "startup system-software %s" % self.software_file commands.append(cmd) self.updates_cmd.append( "startup system-software %s" % self.software_file) run_commands(self.module, commands) self.changed = True def startup_next_pat_file(self): """set next patch file""" commands = list() cmd = {'output': None, 'command': ''} if self.slot: if self.slot == "all": cmd['command'] = "startup patch %s %s" % ( self.patch_file, self.slot) commands.append(cmd) self.updates_cmd.append( "startup patch %s %s" % (self.patch_file, self.slot)) run_commands(self.module, commands) self.changed = True else: cmd['command'] = "startup patch %s slot %s" % ( self.patch_file, self.slot) commands.append(cmd) self.updates_cmd.append( "startup patch %s slot %s" % (self.patch_file, self.slot)) run_commands(self.module, commands) self.changed = True if not self.slot: cmd['command'] = "startup patch %s" % self.patch_file commands.append(cmd) self.updates_cmd.append( "startup patch %s" % self.patch_file) run_commands(self.module, commands) self.changed = True def check_params(self): """Check all input params""" # cfg_file check if self.cfg_file: if not self.get_cfg_filename_type(self.cfg_file): self.module.fail_json( msg='Error: Invalid cfg file name or cfg file name extension ( *.cfg, *.zip, *.dat ).') # software_file check if self.software_file: if not self.get_software_filename_type(self.software_file): self.module.fail_json( msg='Error: Invalid software file name or software file name extension ( *.cc).') # patch_file check if self.patch_file: if not self.get_pat_filename_type(self.patch_file): self.module.fail_json( msg='Error: Invalid patch file name or patch file name extension ( *.PAT ).') # slot check if self.slot: if self.slot.isdigit(): if int(self.slot) <= 0 or int(self.slot) > 16: self.module.fail_json( msg='Error: The number of slot is not in the range from 1 to 16.') else: if len(self.slot) <= 0 or len(self.slot) > 32: self.module.fail_json( msg='Error: The length of slot is not in the range from 1 to 32.') def get_proposed(self): """get proposed info""" if self.cfg_file: self.proposed["cfg_file"] = self.cfg_file if self.software_file: self.proposed["system_file"] = self.software_file if self.patch_file: self.proposed["patch_file"] = self.patch_file if self.slot: self.proposed["slot"] = self.slot def get_existing(self): """get existing info""" if not self.startup_info: self.existing["StartupInfos"] = None else: self.existing["StartupInfos"] = self.startup_info["StartupInfos"] def get_end_state(self): """get end state info""" if not self.startup_info: self.end_state["StartupInfos"] = None else: self.end_state["StartupInfos"] = self.startup_info["StartupInfos"] if self.end_state == self.existing: self.changed = False def work(self): """worker""" self.check_params() self.get_proposed() self.startup_info = self.get_startup_dict() self.get_existing() startup_info = self.startup_info["StartupInfos"][0] if self.cfg_file: if self.cfg_file != startup_info["nextStartupFile"]: self.startup_next_cfg_file() if self.software_file: if self.software_file != startup_info["nextSysSoft"]: self.startup_next_software_file() if self.patch_file: if self.patch_file != startup_info["nextPatchFile"]: self.startup_next_pat_file() if self.action == "display": self.startup_info = self.get_startup_dict() self.startup_info = self.get_startup_dict() self.get_end_state() self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def
%d', gen_state) except WindowsError as ex: if ex.winerror == 2: LOG.debug('Sysprep data not found in the registry, ' 'skipping sysprep completion check.') else: raise ex def check_service_exists(self, service_name): LOG.debug("Checking if service exists: %s", service_name) try: with self._get_service_handle(service_name): return True except pywintypes.error as ex: if ex.winerror == winerror.ERROR_SERVICE_DOES_NOT_EXIST: return False raise def get_service_status(self, service_name): LOG.debug("Getting service status for: %s", service_name) with self._get_service_handle( service_name, win32service.SERVICE_QUERY_STATUS) as hs: service_status = win32service.QueryServiceStatusEx(hs) state = service_status['CurrentState'] return self._SERVICE_STATUS_MAP.get( state, WindowsUtils.SERVICE_STATUS_UNKNOWN) def get_service_start_mode(self, service_name): LOG.debug("Getting service start mode for: %s", service_name) with self._get_service_handle( service_name, win32service.SERVICE_QUERY_CONFIG) as hs: service_config = win32service.QueryServiceConfig(hs) start_type = service_config[1] return [k for k, v in self._SERVICE_START_TYPE_MAP.items() if v == start_type][0] def set_service_start_mode(self, service_name, start_mode): # TODO(alexpilotti): Handle the "Delayed Start" case LOG.debug("Setting service start mode for: %s", service_name) start_type = self._get_win32_start_type(start_mode) with self._get_service_handle( service_name, win32service.SERVICE_CHANGE_CONFIG) as hs: win32service.ChangeServiceConfig( hs, win32service.SERVICE_NO_CHANGE, start_type, win32service.SERVICE_NO_CHANGE, None, None, False, None, None, None, None) def start_service(self, service_name): LOG.debug('Starting service %s', service_name) with self._get_service_handle( service_name, win32service.SERVICE_START) as hs: win32service.StartService(hs, service_name) def stop_service(self, service_name, wait=False): LOG.debug('Stopping service %s', service_name) with self._get_service_handle( service_name, win32service.SERVICE_STOP | win32service.SERVICE_QUERY_STATUS) as hs: win32service.ControlService(hs, win32service.SERVICE_CONTROL_STOP) if wait: while True: service_status = win32service.QueryServiceStatusEx(hs) state = service_status['CurrentState'] if state == win32service.SERVICE_STOPPED: return time.sleep(.1) @staticmethod @contextlib.contextmanager def _get_service_control_manager( scm_access=win32service.SC_MANAGER_CONNECT): hscm = win32service.OpenSCManager(None, None, scm_access) try: yield hscm finally: win32service.CloseServiceHandle(hscm) @staticmethod @contextlib.contextmanager def _get_service_handle(service_name, service_access=win32service.SERVICE_QUERY_CONFIG, scm_access=win32service.SC_MANAGER_CONNECT): with WindowsUtils._get_service_control_manager(scm_access) as hscm: hs = win32service.OpenService(hscm, service_name, service_access) try: yield hs finally: win32service.CloseServiceHandle(hs) @staticmethod def _get_win32_start_type(start_mode): start_type = WindowsUtils._SERVICE_START_TYPE_MAP.get(start_mode) if not start_type: raise exception.InvalidStateException( "Invalid service start mode: %s" % start_mode) return start_type def create_service(self, service_name, display_name, path, start_mode, username=None, password=None): LOG.debug('Creating service %s', service_name) start_type = self._get_win32_start_type(start_mode) with WindowsUtils._get_service_control_manager( scm_access=win32service.SC_MANAGER_CREATE_SERVICE) as hscm: hs = win32service.CreateService( hscm, service_name, display_name, win32service.SERVICE_ALL_ACCESS, win32service.SERVICE_WIN32_OWN_PROCESS, start_type, win32service.SERVICE_ERROR_NORMAL, path, None, False, None, username, password) win32service.CloseServiceHandle(hs) def delete_service(self, service_name): LOG.debug('Deleting service %s', service_name) with self._get_service_handle( service_name, win32service.SERVICE_ALL_ACCESS) as hs: win32service.DeleteService(hs) def set_service_credentials(self, service_name, username, password): LOG.debug('Setting service credentials: %s', service_name) with self._get_service_handle( service_name, win32service.SERVICE_CHANGE_CONFIG) as hs: win32service.ChangeServiceConfig( hs, win32service.SERVICE_NO_CHANGE, win32service.SERVICE_NO_CHANGE, win32service.SERVICE_NO_CHANGE, None, None, False, None, username, password, None) def get_service_username(self, service_name): LOG.debug('Getting service username: %s', service_name) with self._get_service_handle(service_name) as hs: cfg = win32service.QueryServiceConfig(hs) return cfg[7] def reset_service_password(self): """This is needed to avoid pass the hash attacks.""" if not self.check_service_exists(self._service_name): LOG.info("Service does not exist: %s", self._service_name) return None service_username = self.get_service_username(self._service_name) # Ignore builtin accounts if "\\" not in service_username: LOG.info("Skipping password reset, service running as a built-in " "account: %s", service_username) return None domain, username = service_username.split('\\') if domain != ".": LOG.info("Skipping password reset, service running as a domain " "account: %s", service_username) return None LOG.debug('Resetting password for service user: %s', service_username) maximum_length = self.get_maximum_password_length() password = self.generate_random_password(maximum_length) self.set_user_password(username, password) self.set_service_credentials( self._service_name, service_username, password) return domain, username, password def terminate(self): # Wait for the service to start. Polling the service "Started" property # is not enough time.sleep(3) self.stop_service(self._service_name) def get_default_gateway(self): default_routes = [r for r in self._get_ipv4_routing_table() if r[0] == '0.0.0.0'] if default_routes: return default_routes[0][3], default_routes[0][2] else: return None, None @staticmethod def _heap_alloc(heap, size): table_mem = kernel32.HeapAlloc(heap, 0, ctypes.c_size_t(size.value)) if not table_mem: raise exception.CloudbaseInitException( 'Unable to allocate memory for the IP forward table') return table_mem @contextlib.contextmanager def _get_forward_table(self): heap = kernel32.GetProcessHeap() forward_table_size = ctypes.sizeof(Win32_MIB_IPFORWARDTABLE) size = wintypes.ULONG(forward_table_size) table_mem = self._heap_alloc(heap, size) p_forward_table = ctypes.cast( table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE)) try: err = iphlpapi.GetIpForwardTable(p_forward_table, ctypes.byref(size), 0) if err == self.ERROR_INSUFFICIENT_BUFFER: kernel32.HeapFree(heap, 0, p_forward_table) table_mem = self._heap_alloc(heap, size) p_forward_table = ctypes.cast( table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE)) err = iphlpapi.GetIpForwardTable(p_forward_table, ctypes.byref(size), 0) if err and err != kernel32.ERROR_NO_DATA: raise exception.CloudbaseInitException( 'Unable to get IP forward table. Error: %s' % err) yield p_forward_table finally: kernel32.HeapFree(heap, 0, p_forward_table) def _get_ipv4_routing_table(self): routing_table = [] with self._get_forward_table() as p_forward_table: forward_table = p_forward_table.contents table = ctypes.cast( ctypes.addressof(forward_table.table), ctypes.POINTER(Win32_MIB_IPFORWARDROW * forward_table.dwNumEntries)).contents for row in table: destination = Ws2_32.inet_ntoa( row.dwForwardDest).decode() netmask = Ws2_32.inet_ntoa( row.dwForwardMask).decode() gateway = Ws2_32.inet_ntoa( row.dwForwardNextHop).decode() routing_table.append(( destination, netmask, gateway, row.dwForwardIfIndex, row.dwForwardMetric1)) return routing_table def check_static_route_exists(self, destination): return len([r for r in self._get_ipv4_routing_table() if r[0] == destination]) > 0 def add_static_route(self, destination, mask, next_hop, interface_index, metric): args = ['ROUTE', 'ADD', destination, 'MASK', mask, next_hop] (out, err, ret_val) = self.execute_process(args) # Cannot use the return value to determine the outcome if ret_val or err: raise exception.CloudbaseInitException( 'Unable to add route: %s' % err) def get_os_version(self): vi = Win32_OSVERSIONINFOEX_W() vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W) ret_val = ntdll.RtlGetVersion(ctypes.byref(vi)) if ret_val: raise exception.WindowsCloudbaseInitException( "RtlGetVersion failed with error: %s" % ret_val) return {"major_version": vi.dwMajorVersion, "minor_version": vi.dwMinorVersion, "build_number": vi.dwBuildNumber, "platform_id": vi.dwPlatformId, "csd_version": vi.szCSDVersion, "service_pack_major": vi.wServicePackMajor, "service_pack_minor": vi.wServicePackMinor, "suite_mask": vi.wSuiteMask, "product_type": vi.wProductType} def is_client_os(self): return self.get_os_version()["product_type"] == self.VER_NT_WORKSTATION def check_os_version(self, major, minor, build=0): vi = Win32_OSVERSIONINFOEX_W() vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W) vi.dwMajorVersion = major vi.dwMinorVersion = minor vi.dwBuildNumber = build mask = 0 for type_mask in [VER_MAJORVERSION, VER_MINORVERSION, VER_BUILDNUMBER]: mask = kernel32.VerSetConditionMask(mask, type_mask, VER_GREATER_EQUAL) type_mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER ret_val = ntdll.RtlVerifyVersionInfo(ctypes.byref(vi), type_mask, mask) if not ret_val: return True elif ret_val == self.STATUS_REVISION_MISMATCH: return False else: raise exception.CloudbaseInitException( "RtlVerifyVersionInfo failed with error: %s" % ret_val) def get_volume_label(self, drive): max_label_size = 261 label = ctypes.create_unicode_buffer(max_label_size) ret_val = kernel32.GetVolumeInformationW(six.text_type(drive), label, max_label_size, 0, 0, 0, 0, 0) if ret_val: return label.value def get_volume_path_names_by_mount_point(self, mount_point): max_volume_name_len = 50 volume_name = ctypes.create_unicode_buffer(max_volume_name_len) if not kernel32.GetVolumeNameForVolumeMountPointW( six.text_type(mount_point), volume_name, max_volume_name_len): if kernel32.GetLastError() in [self.ERROR_INVALID_NAME, self.ERROR_PATH_NOT_FOUND]: raise exception.ItemNotFoundException( "Mount point not found: %s" % mount_point) else: raise exception.WindowsCloudbaseInitException( "Failed to get volume name for mount point: %s. " "Error: %%r" % mount_point) volume_path_names_len = wintypes.DWORD(100) while True: volume_path_names = ctypes.create_unicode_buffer( volume_path_names_len.value) if not kernel32.GetVolumePathNamesForVolumeNameW( volume_name, volume_path_names, volume_path_names_len, ctypes.byref(volume_path_names_len)): if kernel32.GetLastError() == self.ERROR_MORE_DATA: continue else: raise exception.WindowsCloudbaseInitException( "Failed to get path names for volume name: %s." "Error: %%r" % volume_name.value) return [n for n in volume_path_names[ :volume_path_names_len.value - 1].split('\0') if n] def generate_random_password(self, length): if length < 3: raise exception.CloudbaseInitException( "Password can not have less than 3 characters!") while True: pwd = super(WindowsUtils, self).generate_random_password(length) # Make sure that the Windows complexity requirements are met: # http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx valid = True for r in ["[a-z]", "[A-Z]", "[0-9]"]: if not re.search(r, pwd): valid = False if valid: return pwd def _split_str_buf_list(self, buf, buf_len): i = 0 value = '' values = [] while i < buf_len: c = buf[i] if c != '\x00': value += c else: values.append(value) value = '' i += 1 return values def get_logical_drives(self): buf_size = self.MAX_PATH buf = ctypes.create_unicode_buffer(buf_size + 1) buf_len = kernel32.GetLogicalDriveStringsW(buf_size, buf) if not buf_len: raise exception.WindowsCloudbaseInitException( "GetLogicalDriveStringsW failed: %r") return self._split_str_buf_list(buf, buf_len) def get_cdrom_drives(self): drives = self.get_logical_drives() return [d for d in drives if kernel32.GetDriveTypeW(d) == self.DRIVE_CDROM] def _is_64bit_arch(self): # interpreter's bits return struct.calcsize("P") == 8 def get_physical_disks(self): physical_disks = [] disk_guid = GUID_DEVINTERFACE_DISK handle_disks = setupapi.SetupDiGetClassDevsW( ctypes.byref(disk_guid), None, None, self.DIGCF_PRESENT | self.DIGCF_DEVICEINTERFACE) if handle_disks == self.INVALID_HANDLE_VALUE: raise exception.CloudbaseInitException( "SetupDiGetClassDevs failed") try: did = Win32_SP_DEVICE_INTERFACE_DATA() did.cbSize = ctypes.sizeof(Win32_SP_DEVICE_INTERFACE_DATA) index = 0 while setupapi.SetupDiEnumDeviceInterfaces( handle_disks, None, ctypes.byref(disk_guid), index, ctypes.byref(did)): index += 1 handle_disk = self.INVALID_HANDLE_VALUE required_size = wintypes.DWORD() if not setupapi.SetupDiGetDeviceInterfaceDetailW( handle_disks, ctypes.byref(did), None, 0, ctypes.byref(required_size), None): if (kernel32.GetLastError() != self.ERROR_INSUFFICIENT_BUFFER): raise exception.WindowsCloudbaseInitException( "SetupDiGetDeviceInterfaceDetailW failed: %r") pdidd = ctypes.cast( msvcrt.malloc(ctypes.c_size_t(required_size.value)), ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W)) try: pdidd.contents.cbSize = ctypes.sizeof( Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W) if not self._is_64bit_arch(): # NOTE(cpoieana): For some reason, on x86 platforms # the alignment or content of the struct # is not taken into consideration. pdidd.contents.cbSize = 6 if not setupapi.SetupDiGetDeviceInterfaceDetailW( handle_disks, ctypes.byref(did), pdidd, required_size, None, None): raise exception.WindowsCloudbaseInitException( "SetupDiGetDeviceInterfaceDetailW failed: %r") device_path = ctypes.cast( pdidd.contents.DevicePath, wintypes.LPWSTR).value handle_disk = kernel32.CreateFileW( device_path, 0, self.FILE_SHARE_READ, None, self.OPEN_EXISTING, 0, 0) if handle_disk == self.INVALID_HANDLE_VALUE: raise exception.CloudbaseInitException( 'CreateFileW failed') sdn = Win32_STORAGE_DEVICE_NUMBER() b = wintypes.DWORD() if not kernel32.DeviceIoControl( handle_disk, self.IOCTL_STORAGE_GET_DEVICE_NUMBER, None, 0, ctypes.byref(sdn), ctypes.sizeof(sdn), ctypes.byref(b), None): raise exception.WindowsCloudbaseInitException( 'DeviceIoControl failed: %r') physical_disks.append( r"\\.\PHYSICALDRIVE%d" % sdn.DeviceNumber) finally: msvcrt.free(pdidd) if handle_disk != self.INVALID_HANDLE_VALUE: kernel32.CloseHandle(handle_disk) finally: setupapi.SetupDiDestroyDeviceInfoList(handle_disks) return physical_disks def get_volumes(self): """Retrieve a list with all the volumes found on all disks.""" volumes = [] volume = ctypes.create_unicode_buffer(chr(0) * self.MAX_PATH) handle_volumes = kernel32.FindFirstVolumeW(volume, self.MAX_PATH) if handle_volumes == self.INVALID_HANDLE_VALUE: raise exception.WindowsCloudbaseInitException( "FindFirstVolumeW failed: %r") try: while True: volumes.append(volume.value) found = kernel32.FindNextVolumeW(handle_volumes, volume, self.MAX_PATH) if not found: errno
<filename>examples/hsgp.py<gh_stars>0 # Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 """ Example: Hilbert space approximation for Gaussian processes. ============================================================ This example replicates the model in the excellent case study by <NAME> [1] (originally written using R and Stan). The case study uses approximate Gaussian processes [2] to model the relative number of births per day in the US from 1969 to 1988. The Hilbert space approximation is way faster than the exact Gaussian processes because it circumvents the need for inverting the covariance matrix. The original case study also emphasizes the iterative process of building a Bayesian model, which is excellent as a pedagogical resource. Here, however, we replicate only the model that includes all components (long term trend, smooth year seasonality, slowly varying day of week effect, day of the year effect and special floating days effects). The different components of the model are isolated into separate functions so that they can easily be reused in different contexts. To combine the multiple components into a single birthdays model, here we make use of Numpyro's `scope` handler which modifies the site names of the components by adding a prefix to them. By doing this, we avoid duplication of site names within the model. Following this pattern, it is straightforward to construct the other models in [1] with the code provided here. There are a few minor differences in the mathematical details of our models, which we had to make for the chains to mix properly or for ease of implementation. We have commented on the places where our models are different. The periodic kernel approximation requires tensorflow-probability on a jax backend. See <https://www.tensorflow.org/probability/examples/TensorFlow_Probability_on_JAX> for installation instructions. **References:** 1. <NAME>, Simpson, et al (2020), `"Bayesian workflow book - Birthdays" <https://avehtari.github.io/casestudies/Birthdays/birthdays.html>`. 2. <NAME>, Bürkner PC, <NAME>, et al (2020), "Practical hilbert space approximate bayesian gaussian processes for probabilistic programming". .. image:: ../_static/img/examples/hsgp.png :align: center """ import argparse import os import matplotlib.pyplot as plt import pandas as pd import jax import jax.numpy as jnp from tensorflow_probability.substrates import jax as tfp import numpyro from numpyro import deterministic, plate, sample import numpyro.distributions as dist from numpyro.handlers import scope from numpyro.infer import MCMC, NUTS, init_to_median # --- Data processing functions def get_labour_days(dates): """ First monday of September """ is_september = dates.dt.month.eq(9) is_monday = dates.dt.weekday.eq(0) is_first_week = dates.dt.day.le(7) is_labour_day = is_september & is_monday & is_first_week is_day_after = is_labour_day.shift(fill_value=False) return is_labour_day | is_day_after def get_memorial_days(dates): """ Last monday of May """ is_may = dates.dt.month.eq(5) is_monday = dates.dt.weekday.eq(0) is_last_week = dates.dt.day.ge(25) is_memorial_day = is_may & is_monday & is_last_week is_day_after = is_memorial_day.shift(fill_value=False) return is_memorial_day | is_day_after def get_thanksgiving_days(dates): """ Third thursday of November """ is_november = dates.dt.month.eq(11) is_thursday = dates.dt.weekday.eq(3) is_third_week = dates.dt.day.between(22, 28) is_thanksgiving = is_november & is_thursday & is_third_week is_day_after = is_thanksgiving.shift(fill_value=False) return is_thanksgiving | is_day_after def get_floating_days_indicators(dates): def encode(x): return jnp.array(x.values, dtype=jnp.result_type(int)) return { "labour_days_indicator": encode(get_labour_days(dates)), "memorial_days_indicator": encode(get_memorial_days(dates)), "thanksgiving_days_indicator": encode(get_thanksgiving_days(dates)), } def load_data(): URL = "https://raw.githubusercontent.com/avehtari/casestudies/master/Birthdays/data/births_usa_1969.csv" data = pd.read_csv(URL, sep=",") day0 = pd.to_datetime("31-Dec-1968") dates = [day0 + pd.Timedelta(f"{i}d") for i in data["id"]] data["date"] = dates data["births_relative"] = data["births"] / data["births"].mean() return data def make_birthdays_data_dict(data): x = data["id"].values y = data["births_relative"].values dates = data["date"] xsd = jnp.array((x - x.mean()) / x.std()) ysd = jnp.array((y - y.mean()) / y.std()) day_of_week = jnp.array((data["day_of_week"] - 1).values) day_of_year = jnp.array((data["day_of_year"] - 1).values) floating_days = get_floating_days_indicators(dates) period = 365.25 w0 = x.std() * (jnp.pi * 2 / period) L = 1.5 * max(xsd) M1 = 10 M2 = 10 # 20 in original case study M3 = 5 return { "x": xsd, "day_of_week": day_of_week, "day_of_year": day_of_year, "w0": w0, "L": L, "M1": M1, "M2": M2, "M3": M3, **floating_days, "y": ysd, } # --- Modelling utility functions --- # def spectral_density(w, alpha, length): c = alpha * jnp.sqrt(2 * jnp.pi) * length e = jnp.exp(-0.5 * (length**2) * (w**2)) return c * e def diag_spectral_density(alpha, length, L, M): sqrt_eigenvalues = jnp.arange(1, 1 + M) * jnp.pi / 2 / L return spectral_density(sqrt_eigenvalues, alpha, length) def eigenfunctions(x, L, M): """ The first `M` eigenfunctions of the laplacian operator in `[-L, L]` evaluated at `x`. These are used for the approximation of the squared exponential kernel. """ m1 = (jnp.pi / (2 * L)) * jnp.tile(L + x[:, None], M) m2 = jnp.diag(jnp.linspace(1, M, num=M)) num = jnp.sin(m1 @ m2) den = jnp.sqrt(L) return num / den def modified_bessel_first_kind(v, z): v = jnp.asarray(v, dtype=float) return jnp.exp(jnp.abs(z)) * tfp.math.bessel_ive(v, z) def diag_spectral_density_periodic(alpha, length, M): """ Not actually a spectral density but these are used in the same way. These are simply the first `M` coefficients of the low rank approximation for the periodic kernel. """ a = length ** (-2) J = jnp.arange(0, M) c = jnp.where(J > 0, 2, 1) q2 = (c * alpha**2 / jnp.exp(a)) * modified_bessel_first_kind(J, a) return q2 def eigenfunctions_periodic(x, w0, M): """ Basis functions for the approximation of the periodic kernel. """ m1 = jnp.tile(w0 * x[:, None], M) m2 = jnp.diag(jnp.arange(M, dtype=jnp.float32)) mw0x = m1 @ m2 cosines = jnp.cos(mw0x) sines = jnp.sin(mw0x) return cosines, sines # --- Approximate Gaussian processes --- # def approx_se_ncp(x, alpha, length, L, M): """ Hilbert space approximation for the squared exponential kernel in the non-centered parametrisation. """ phi = eigenfunctions(x, L, M) spd = jnp.sqrt(diag_spectral_density(alpha, length, L, M)) with plate("basis", M): beta = sample("beta", dist.Normal(0, 1)) f = deterministic("f", phi @ (spd * beta)) return f def approx_periodic_gp_ncp(x, alpha, length, w0, M): """ Low rank approximation for the periodic squared exponential kernel in the non-centered parametrisation. """ q2 = diag_spectral_density_periodic(alpha, length, M) cosines, sines = eigenfunctions_periodic(x, w0, M) with plate("cos_basis", M): beta_cos = sample("beta_cos", dist.Normal(0, 1)) with plate("sin_basis", M - 1): beta_sin = sample("beta_sin", dist.Normal(0, 1)) # The first eigenfunction for the sine component # is zero, so the first parameter wouldn't contribute to the approximation. # We set it to zero to identify the model and avoid divergences. zero = jnp.array([0.0]) beta_sin = jnp.concatenate((zero, beta_sin)) f = deterministic("f", cosines @ (q2 * beta_cos) + sines @ (q2 * beta_sin)) return f # --- Components of the Birthdays model --- # def trend_gp(x, L, M): alpha = sample("alpha", dist.HalfNormal(1.0)) length = sample("length", dist.InverseGamma(10.0, 2.0)) f = approx_se_ncp(x, alpha, length, L, M) return f def year_gp(x, w0, M): alpha = sample("alpha", dist.HalfNormal(1.0)) length = sample("length", dist.HalfNormal(0.2)) # scale=0.1 in original f = approx_periodic_gp_ncp(x, alpha, length, w0, M) return f def weekday_effect(day_of_week): with plate("plate_day_of_week", 6): weekday = sample("_beta", dist.Normal(0, 1)) monday = jnp.array([-jnp.sum(weekday)]) # Monday = 0 in original beta = deterministic("beta", jnp.concatenate((monday, weekday))) return beta[day_of_week] def yearday_effect(day_of_year): slab_df = 50 # 100 in original case study slab_scale = 2 scale_global = 0.1 tau = sample( "tau", dist.HalfNormal(2 * scale_global) ) # Original uses half-t with 100df c_aux = sample("c_aux", dist.InverseGamma(0.5 * slab_df, 0.5 * slab_df)) c = slab_scale * jnp.sqrt(c_aux) # Jan 1st: Day 0 # Feb 29th: Day 59 # Dec 31st: Day 365 with plate("plate_day_of_year", 366): lam = sample("lam", dist.HalfCauchy(scale=1)) lam_tilde = jnp.sqrt(c) * lam / jnp.sqrt(c + (tau * lam) ** 2) beta = sample("beta", dist.Normal(loc=0, scale=tau * lam_tilde)) return beta[day_of_year] def special_effect(indicator): beta = sample("beta", dist.Normal(0, 1)) return beta * indicator # --- Model --- # def birthdays_model( x, day_of_week, day_of_year, memorial_days_indicator, labour_days_indicator, thanksgiving_days_indicator, w0, L, M1, M2, M3, y=None, ): intercept = sample("intercept", dist.Normal(0, 1)) f1 = scope(trend_gp, "trend")(x, L, M1) f2 = scope(year_gp, "year")(x, w0, M2) g3 = scope(trend_gp, "week-trend")( x, L, M3 ) # length ~ lognormal(-1, 1) in original weekday = scope(weekday_effect, "week")(day_of_week) yearday = scope(yearday_effect, "day")(day_of_year) # # --- special days memorial = scope(special_effect, "memorial")(memorial_days_indicator) labour = scope(special_effect, "labour")(labour_days_indicator) thanksgiving = scope(special_effect, "thanksgiving")(thanksgiving_days_indicator) day = yearday + memorial + labour + thanksgiving # --- Combine components f = deterministic("f", intercept + f1 + f2 + jnp.exp(g3) * weekday + day) sigma = sample("sigma", dist.HalfNormal(0.5)) with plate("obs", x.shape[0]): sample("y", dist.Normal(f, sigma), obs=y) # --- plotting function --- # DATA_STYLE = dict(marker=".", alpha=0.8, lw=0, label="data", c="lightgray") MODEL_STYLE = dict(lw=2, color="k") def plot_trend(data, samples, ax=None): y = data["births_relative"] x = data["date"] fsd = samples["intercept"][:, None] + samples["trend/f"] f = jnp.quantile(fsd * y.std() + y.mean(), 0.50, axis=0) if ax is None: ax = plt.gca() ax.plot(x, y, **DATA_STYLE) ax.plot(x, f, **MODEL_STYLE) return ax def plot_seasonality(data, samples, ax=None): y = data["births_relative"] sdev = y.std() mean = y.mean() baseline
[escCharLookBehind, startTokenEsc] if self.setting('allowWhitespaceAfterDirectiveStartToken'): reParts.append('[ \t]*') reParts.append(validSecondCharsLookAhead) self.directiveStartTokenRE = cachedRegex(''.join(reParts)) self.directiveEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc) def _makePspREs(self): """Setup the regexs for PSP parsing.""" startToken = self.setting('PSPStartToken') startTokenEsc = escapeRegexChars(startToken) self.PSPStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc) endToken = self.setting('PSPEndToken') endTokenEsc = escapeRegexChars(endToken) self.PSPEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc) def _unescapeCheetahVars(self, theString): """Unescape any escaped Cheetah \$vars in the string. """ token = self.setting('cheetahVarStartToken') return theString.replace('\\' + token, token) def _unescapeDirectives(self, theString): """Unescape any escaped Cheetah directives in the string. """ token = self.setting('directiveStartToken') return theString.replace('\\' + token, token) def isLineClearToStartToken(self, pos=None): return self.isLineClearToPos(pos) def matchTopLevelToken(self): """Returns the first match found from the following methods: self.matchCommentStartToken self.matchMultiLineCommentStartToken self.matchVariablePlaceholderStart self.matchExpressionPlaceholderStart self.matchDirective self.matchPSPStartToken self.matchEOLSlurpToken Returns None if no match. """ match = None if self.peek() in self._possibleNonStrConstantChars: for matcher in self._nonStrConstMatchers: match = matcher() if match: break return match def matchPyToken(self): match = pseudoprog.match(self.src(), self.pos()) if match and match.group() in tripleQuotedStringStarts: TQSmatch = tripleQuotedStringREs[match.group()].match(self.src(), self.pos()) if TQSmatch: return TQSmatch return match def getPyToken(self): match = self.matchPyToken() if match is None: raise ParseError(self) elif match.group() in tripleQuotedStringStarts: raise ParseError(self, msg='Malformed triple-quoted string') return self.readTo(match.end()) def matchEOLSlurpToken(self): if self.EOLSlurpRE: return self.EOLSlurpRE.match(self.src(), self.pos()) def getEOLSlurpToken(self): match = self.matchEOLSlurpToken() if not match: raise ParseError(self, msg='Invalid EOL slurp token') return self.readTo(match.end()) def matchCommentStartToken(self): return self.commentStartTokenRE.match(self.src(), self.pos()) def getCommentStartToken(self): match = self.matchCommentStartToken() if not match: raise ParseError(self, msg='Invalid single-line comment start token') return self.readTo(match.end()) def matchMultiLineCommentStartToken(self): return self.multiLineCommentTokenStartRE.match(self.src(), self.pos()) def getMultiLineCommentStartToken(self): match = self.matchMultiLineCommentStartToken() if not match: raise ParseError(self, msg='Invalid multi-line comment start token') return self.readTo(match.end()) def matchMultiLineCommentEndToken(self): return self.multiLineCommentEndTokenRE.match(self.src(), self.pos()) def getMultiLineCommentEndToken(self): match = self.matchMultiLineCommentEndToken() if not match: raise ParseError(self, msg='Invalid multi-line comment end token') return self.readTo(match.end()) def getCommaSeparatedSymbols(self): """ Loosely based on getDottedName to pull out comma separated named chunks """ srcLen = len(self) pieces = [] nameChunks = [] if not self.peek() in identchars: raise ParseError(self) while self.pos() < srcLen: c = self.peek() if c in namechars: nameChunk = self.getIdentifier() nameChunks.append(nameChunk) elif c == '.': if self.pos()+1 <srcLen and self.peek(1) in identchars: nameChunks.append(self.getc()) else: break elif c == ',': self.getc() pieces.append(''.join(nameChunks)) nameChunks = [] elif c in (' ', '\t'): self.getc() else: break if nameChunks: pieces.append(''.join(nameChunks)) return pieces def getDottedName(self): srcLen = len(self) nameChunks = [] if not self.peek() in identchars: raise ParseError(self) while self.pos() < srcLen: c = self.peek() if c in namechars: nameChunk = self.getIdentifier() nameChunks.append(nameChunk) elif c == '.': if self.pos()+1 <srcLen and self.peek(1) in identchars: nameChunks.append(self.getc()) else: break else: break return ''.join(nameChunks) def matchIdentifier(self): return identRE.match(self.src(), self.pos()) def getIdentifier(self): match = self.matchIdentifier() if not match: raise ParseError(self, msg='Invalid identifier') return self.readTo(match.end()) def matchOperator(self): match = self.matchPyToken() if match and match.group() not in operators: match = None return match def getOperator(self): match = self.matchOperator() if not match: raise ParseError(self, msg='Expected operator') return self.readTo( match.end() ) def matchAssignmentOperator(self): match = self.matchPyToken() if match and match.group() not in assignmentOps: match = None return match def getAssignmentOperator(self): match = self.matchAssignmentOperator() if not match: raise ParseError(self, msg='Expected assignment operator') return self.readTo( match.end() ) def matchDirective(self): """Returns False or the name of the directive matched. """ startPos = self.pos() if not self.matchDirectiveStartToken(): return False self.getDirectiveStartToken() directiveName = self.matchDirectiveName() self.setPos(startPos) return directiveName def matchDirectiveName(self, directiveNameChars=identchars+'0123456789-@'): startPos = self.pos() possibleMatches = self._directiveNamesAndParsers.keys() name = '' match = None while not self.atEnd(): c = self.getc() if not c in directiveNameChars: break name += c if name == '@': if not self.atEnd() and self.peek() in identchars: match = '@' break possibleMatches = [dn for dn in possibleMatches if dn.startswith(name)] if not possibleMatches: break elif (name in possibleMatches and (self.atEnd() or self.peek() not in directiveNameChars)): match = name break self.setPos(startPos) return match def matchDirectiveStartToken(self): return self.directiveStartTokenRE.match(self.src(), self.pos()) def getDirectiveStartToken(self): match = self.matchDirectiveStartToken() if not match: raise ParseError(self, msg='Invalid directive start token') return self.readTo(match.end()) def matchDirectiveEndToken(self): return self.directiveEndTokenRE.match(self.src(), self.pos()) def getDirectiveEndToken(self): match = self.matchDirectiveEndToken() if not match: raise ParseError(self, msg='Invalid directive end token') return self.readTo(match.end()) def matchColonForSingleLineShortFormDirective(self): if not self.atEnd() and self.peek()==':': restOfLine = self[self.pos()+1:self.findEOL()] restOfLine = restOfLine.strip() if not restOfLine: return False elif self.commentStartTokenRE.match(restOfLine): return False else: # non-whitespace, non-commment chars found return True return False def matchPSPStartToken(self): return self.PSPStartTokenRE.match(self.src(), self.pos()) def matchPSPEndToken(self): return self.PSPEndTokenRE.match(self.src(), self.pos()) def getPSPStartToken(self): match = self.matchPSPStartToken() if not match: raise ParseError(self, msg='Invalid psp start token') return self.readTo(match.end()) def getPSPEndToken(self): match = self.matchPSPEndToken() if not match: raise ParseError(self, msg='Invalid psp end token') return self.readTo(match.end()) def matchCheetahVarStart(self): """includes the enclosure and cache token""" return self.cheetahVarStartRE.match(self.src(), self.pos()) def matchCheetahVarStartToken(self): """includes the enclosure and cache token""" return self.cheetahVarStartTokenRE.match(self.src(), self.pos()) def matchCheetahVarInExpressionStartToken(self): """no enclosures or cache tokens allowed""" return self.cheetahVarInExpressionStartTokenRE.match(self.src(), self.pos()) def matchVariablePlaceholderStart(self): """includes the enclosure and cache token""" return self.cheetahVarStartRE.match(self.src(), self.pos()) def matchExpressionPlaceholderStart(self): """includes the enclosure and cache token""" return self.expressionPlaceholderStartRE.match(self.src(), self.pos()) def getCheetahVarStartToken(self): """just the start token, not the enclosure or cache token""" match = self.matchCheetahVarStartToken() if not match: raise ParseError(self, msg='Expected Cheetah $var start token') return self.readTo( match.end() ) def getCacheToken(self): try: token = self.cacheTokenRE.match(self.src(), self.pos()) self.setPos( token.end() ) return token.group() except: raise ParseError(self, msg='Expected cache token') def getSilentPlaceholderToken(self): try: token = self.silentPlaceholderTokenRE.match(self.src(), self.pos()) self.setPos( token.end() ) return token.group() except: raise ParseError(self, msg='Expected silent placeholder token') def getTargetVarsList(self): varnames = [] while not self.atEnd(): if self.peek() in ' \t\f': self.getWhiteSpace() elif self.peek() in '\r\n': break elif self.startswith(','): self.advance() elif self.startswith('in ') or self.startswith('in\t'): break #elif self.matchCheetahVarStart(): elif self.matchCheetahVarInExpressionStartToken(): self.getCheetahVarStartToken() self.getSilentPlaceholderToken() self.getCacheToken() varnames.append( self.getDottedName() ) elif self.matchIdentifier(): varnames.append( self.getDottedName() ) else: break return varnames def getCheetahVar(self, plain=False, skipStartToken=False): """This is called when parsing inside expressions. Cache tokens are only valid in placeholders so this method discards any cache tokens found. """ if not skipStartToken: self.getCheetahVarStartToken() self.getSilentPlaceholderToken() self.getCacheToken() return self.getCheetahVarBody(plain=plain) def getCheetahVarBody(self, plain=False): # @@TR: this should be in the compiler return self._compiler.genCheetahVar(self.getCheetahVarNameChunks(), plain=plain) def getCheetahVarNameChunks(self): """ nameChunks = list of Cheetah $var subcomponents represented as tuples [ (namemapperPart,autoCall,restOfName), ] where: namemapperPart = the dottedName base autocall = where NameMapper should use autocalling on namemapperPart restOfName = any arglist, index, or slice If restOfName contains a call arglist (e.g. '(1234)') then autocall is False, otherwise it defaults to True. EXAMPLE ------------------------------------------------------------------------ if the raw CheetahVar is $a.b.c[1].d().x.y.z nameChunks is the list [ ('a.b.c',True,'[1]'), ('d',False,'()'), ('x.y.z',True,''), ] """ chunks = [] while self.pos() < len(self): rest = '' autoCall = True if not self.peek() in identchars + '.': break elif self.peek() == '.': if self.pos()+1 < len(self) and self.peek(1) in identchars: self.advance() # discard the period as it isn't needed with NameMapper else: break dottedName = self.getDottedName() if not self.atEnd() and self.peek() in '([': if self.peek() == '(': rest = self.getCallArgString() else: rest = self.getExpression(enclosed=True) period = max(dottedName.rfind('.'), 0) if period: chunks.append( (dottedName[:period], autoCall, '') ) dottedName = dottedName[period+1:] if rest and rest[0]=='(': autoCall = False chunks.append( (dottedName, autoCall, rest) ) return chunks def getCallArgString(self, enclosures=[], # list of tuples (char, pos), where char is ({ or [ useNameMapper=Unspecified): """ Get a method/function call argument string. This method understands *arg, and **kw """ # @@TR: this settings mangling should be removed if useNameMapper is not Unspecified: useNameMapper_orig = self.setting('useNameMapper') self.setSetting('useNameMapper', useNameMapper) if enclosures: pass else: if not self.peek() == '(': raise ParseError(self, msg="Expected '('") startPos = self.pos() self.getc() enclosures = [('(', startPos), ] argStringBits = ['('] addBit = argStringBits.append while True: if self.atEnd(): open = enclosures[-1][0] close = closurePairsRev[open] self.setPos(enclosures[-1][1]) raise ParseError( self, msg="EOF was reached before a matching '" + close + "' was found for the '" + open + "'") c = self.peek() if c in ")}]": # get the ending enclosure and break if not enclosures: raise ParseError(self) c = self.getc() open = closurePairs[c] if enclosures[-1][0] == open: enclosures.pop() addBit(')') break else: raise ParseError(self) elif c in " \t\f\r\n": addBit(self.getc()) elif self.matchCheetahVarInExpressionStartToken(): startPos = self.pos()
= payment_terms_id body['phone_number'] = phone_number body['shipment_method_id'] = shipment_method_id body['tax_area_display_name'] = tax_area_display_name body['tax_area_id'] = tax_area_id body['tax_liable'] = tax_liable body['tax_registration_number'] = tax_registration_number body['type'] = type_ body['website'] = website body['currency'] = currency body['payment_method'] = payment_method body['payment_term'] = payment_term body['picture'] = picture body['shipment_method'] = shipment_method return client.update_customers(company_id=company_id, customer_id=customer_id, body=body) def financials_financial_company_update_customer_payment(client, company_id, customer_payment_id, id_=None, amount=None, applies_to_invoice_id=None, applies_to_invoice_number=None, comment=None, contact_id=None, customer_id=None, customer_number=None, description=None, document_number=None, external_document_number=None, journal_display_name=None, last_modified_date_time=None, line_number=None, posting_date=None, microsoft_graph_entity_id=None, address=None, blocked=None, currency_code=None, currency_id=None, display_name=None, email=None, microsoft_graph_customer_last_modified_date_time_last_modified_date_time=None, number=None, payment_method_id=None, payment_terms_id=None, phone_number=None, shipment_method_id=None, tax_area_display_name=None, tax_area_id=None, tax_liable=None, tax_registration_number=None, type_=None, website=None, currency=None, payment_method=None, payment_term=None, picture=None, shipment_method=None): body = {} body['id'] = id_ body['amount'] = amount body['applies_to_invoice_id'] = applies_to_invoice_id body['applies_to_invoice_number'] = applies_to_invoice_number body['comment'] = comment body['contact_id'] = contact_id body['customer_id'] = customer_id body['customer_number'] = customer_number body['description'] = description body['document_number'] = document_number body['external_document_number'] = external_document_number body['journal_display_name'] = journal_display_name body['last_modified_date_time'] = last_modified_date_time body['line_number'] = line_number body['posting_date'] = posting_date body['customer'] = {} body['customer']['id'] = microsoft_graph_entity_id body['customer']['address'] = address body['customer']['blocked'] = blocked body['customer']['currency_code'] = currency_code body['customer']['currency_id'] = currency_id body['customer']['display_name'] = display_name body['customer']['email'] = email body['customer']['last_modified_date_time'] = microsoft_graph_customer_last_modified_date_time_last_modified_date_time body['customer']['number'] = number body['customer']['payment_method_id'] = payment_method_id body['customer']['payment_terms_id'] = payment_terms_id body['customer']['phone_number'] = phone_number body['customer']['shipment_method_id'] = shipment_method_id body['customer']['tax_area_display_name'] = tax_area_display_name body['customer']['tax_area_id'] = tax_area_id body['customer']['tax_liable'] = tax_liable body['customer']['tax_registration_number'] = tax_registration_number body['customer']['type'] = type_ body['customer']['website'] = website body['customer']['currency'] = currency body['customer']['payment_method'] = payment_method body['customer']['payment_term'] = payment_term body['customer']['picture'] = picture body['customer']['shipment_method'] = shipment_method return client.update_customer_payments(company_id=company_id, customer_payment_id=customer_payment_id, body=body) def financials_financial_company_update_customer_payment_journal(client, company_id, customer_payment_journal_id, id_=None, balancing_account_id=None, balancing_account_number=None, code=None, display_name=None, last_modified_date_time=None, account=None, customer_payments=None): body = {} body['id'] = id_ body['balancing_account_id'] = balancing_account_id body['balancing_account_number'] = balancing_account_number body['code'] = code body['display_name'] = display_name body['last_modified_date_time'] = last_modified_date_time body['account'] = account body['customer_payments'] = customer_payments return client.update_customer_payment_journals(company_id=company_id, customer_payment_journal_id=customer_payment_journal_id, body=body) def financials_financial_company_update_dimension(client, company_id, dimension_id, id_=None, code=None, display_name=None, last_modified_date_time=None, dimension_values=None): body = {} body['id'] = id_ body['code'] = code body['display_name'] = display_name body['last_modified_date_time'] = last_modified_date_time body['dimension_values'] = dimension_values return client.update_dimensions(company_id=company_id, dimension_id=dimension_id, body=body) def financials_financial_company_update_dimension_value(client, company_id, dimension_value_id, id_=None, code=None, display_name=None, last_modified_date_time=None): body = {} body['id'] = id_ body['code'] = code body['display_name'] = display_name body['last_modified_date_time'] = last_modified_date_time return client.update_dimension_values(company_id=company_id, dimension_value_id=dimension_value_id, body=body) def financials_financial_company_update_employee(client, company_id, employee_id, id_=None, address=None, birth_date=None, display_name=None, email=None, employment_date=None, given_name=None, job_title=None, last_modified_date_time=None, middle_name=None, mobile_phone=None, number=None, personal_email=None, phone_number=None, statistics_group_code=None, status=None, surname=None, termination_date=None, picture=None): body = {} body['id'] = id_ body['address'] = address body['birth_date'] = birth_date body['display_name'] = display_name body['email'] = email body['employment_date'] = employment_date body['given_name'] = given_name body['job_title'] = job_title body['last_modified_date_time'] = last_modified_date_time body['middle_name'] = middle_name body['mobile_phone'] = mobile_phone body['number'] = number body['personal_email'] = personal_email body['phone_number'] = phone_number body['statistics_group_code'] = statistics_group_code body['status'] = status body['surname'] = surname body['termination_date'] = termination_date body['picture'] = picture return client.update_employees(company_id=company_id, employee_id=employee_id, body=body) def financials_financial_company_update_general_ledger_entry(client, company_id, general_ledger_entry_id, id_=None, account_id=None, account_number=None, credit_amount=None, debit_amount=None, description=None, document_number=None, document_type=None, last_modified_date_time=None, posting_date=None, account=None): body = {} body['id'] = id_ body['account_id'] = account_id body['account_number'] = account_number body['credit_amount'] = credit_amount body['debit_amount'] = debit_amount body['description'] = description body['document_number'] = document_number body['document_type'] = document_type body['last_modified_date_time'] = last_modified_date_time body['posting_date'] = posting_date body['account'] = account return client.update_general_ledger_entries(company_id=company_id, general_ledger_entry_id=general_ledger_entry_id, body=body) def financials_financial_company_update_item(client, company_id, item_id, id_=None, base_unit_of_measure_id=None, blocked=None, display_name=None, gtin=None, inventory=None, item_category_code=None, item_category_id=None, last_modified_date_time=None, number=None, price_includes_tax=None, tax_group_code=None, tax_group_id=None, type_=None, unit_cost=None, unit_price=None, item_category=None, picture=None): body = {} body['id'] = id_ body['base_unit_of_measure_id'] = base_unit_of_measure_id body['blocked'] = blocked body['display_name'] = display_name body['gtin'] = gtin body['inventory'] = inventory body['item_category_code'] = item_category_code body['item_category_id'] = item_category_id body['last_modified_date_time'] = last_modified_date_time body['number'] = number body['price_includes_tax'] = price_includes_tax body['tax_group_code'] = tax_group_code body['tax_group_id'] = tax_group_id body['type'] = type_ body['unit_cost'] = unit_cost body['unit_price'] = unit_price body['item_category'] = item_category body['picture'] = picture return client.update_items(company_id=company_id, item_id=item_id, body=body) def financials_financial_company_update_item_category(client, company_id, item_category_id, id_=None, code=None, display_name=None, last_modified_date_time=None): body = {} body['id'] = id_ body['code'] = code body['display_name'] = display_name body['last_modified_date_time'] = last_modified_date_time return client.update_item_categories(company_id=company_id, item_category_id=item_category_id, body=body) def financials_financial_company_update_journal(client, company_id, journal_id, id_=None, balancing_account_id=None, balancing_account_number=None, code=None, display_name=None, last_modified_date_time=None, account=None, journal_lines=None): body = {} body['id'] = id_ body['balancing_account_id'] = balancing_account_id body['balancing_account_number'] = balancing_account_number body['code'] = code body['display_name'] = display_name body['last_modified_date_time'] = last_modified_date_time body['account'] = account body['journal_lines'] = journal_lines return client.update_journals(company_id=company_id, journal_id=journal_id, body=body) def financials_financial_company_update_journal_line(client, company_id, journal_line_id, id_=None, account_id=None, account_number=None, amount=None, comment=None, description=None, document_number=None, external_document_number=None, journal_display_name=None, last_modified_date_time=None, line_number=None, posting_date=None, account=None): body = {} body['id'] = id_ body['account_id'] = account_id body['account_number'] = account_number body['amount'] = amount body['comment'] = comment body['description'] = description body['document_number'] = document_number body['external_document_number'] = external_document_number body['journal_display_name'] = journal_display_name body['last_modified_date_time'] = last_modified_date_time body['line_number'] = line_number body['posting_date'] = posting_date body['account'] = account return client.update_journal_lines(company_id=company_id, journal_line_id=journal_line_id, body=body) def financials_financial_company_update_payment_method(client, company_id, payment_method_id, id_=None, code=None, display_name=None, last_modified_date_time=None): body = {} body['id'] = id_ body['code'] = code body['display_name'] = display_name body['last_modified_date_time'] = last_modified_date_time return client.update_payment_methods(company_id=company_id, payment_method_id=payment_method_id, body=body) def financials_financial_company_update_payment_term(client, company_id, payment_term_id, id_=None, calculate_discount_on_credit_memos=None, code=None, discount_date_calculation=None, discount_percent=None, display_name=None, due_date_calculation=None, last_modified_date_time=None): body = {} body['id'] = id_ body['calculate_discount_on_credit_memos'] = calculate_discount_on_credit_memos body['code'] = code body['discount_date_calculation'] = discount_date_calculation body['discount_percent'] = discount_percent body['display_name'] = display_name body['due_date_calculation'] = due_date_calculation body['last_modified_date_time'] = last_modified_date_time return client.update_payment_terms(company_id=company_id, payment_term_id=payment_term_id, body=body) def financials_financial_company_update_picture(client, company_id, picture_id, content_type, id_=None, content=None, height=None, width=None): body = {} body['id'] = id_ body['content'] = content body['content_type'] = content_type body['height'] = height body['width'] = width return client.update_picture(company_id=company_id, picture_id=picture_id, body=body) def financials_financial_company_update_purchase_invoice(client, company_id, purchase_invoice_id, id_=None, buy_from_address=None, currency_code=None, currency_id=None, discount_amount=None, discount_applied_before_tax=None, due_date=None, invoice_date=None, last_modified_date_time=None, number=None, pay_to_address=None, pay_to_contact=None, pay_to_name=None, pay_to_vendor_id=None, pay_to_vendor_number=None, prices_include_tax=None, ship_to_address=None, ship_to_contact=None, ship_to_name=None, status=None, total_amount_excluding_tax=None, total_amount_including_tax=None, total_tax_amount=None, vendor_id=None, vendor_invoice_number=None, vendor_name=None, vendor_number=None, currency=None, purchase_invoice_lines=None, microsoft_graph_entity_id=None, address=None, balance=None, blocked=None, microsoft_graph_vendor_currency_code=None, microsoft_graph_vendor_currency_id=None, display_name=None, email=None, microsoft_graph_vendor_last_modified_date_time_last_modified_date_time=None, microsoft_graph_vendor_number=None, payment_method_id=None, payment_terms_id=None, phone_number=None, tax_liable=None, tax_registration_number=None, website=None, microsoft_graph_currency=None, payment_method=None, payment_term=None, picture=None): body = {} body['id'] = id_ body['buy_from_address'] = buy_from_address body['currency_code'] = currency_code body['currency_id'] = currency_id body['discount_amount'] = discount_amount body['discount_applied_before_tax'] = discount_applied_before_tax body['due_date'] = due_date body['invoice_date'] = invoice_date body['last_modified_date_time'] = last_modified_date_time body['number'] = number body['pay_to_address'] = pay_to_address body['pay_to_contact'] = pay_to_contact body['pay_to_name'] = pay_to_name body['pay_to_vendor_id'] = pay_to_vendor_id body['pay_to_vendor_number'] = pay_to_vendor_number body['prices_include_tax'] = prices_include_tax body['ship_to_address'] = ship_to_address body['ship_to_contact'] = ship_to_contact body['ship_to_name'] = ship_to_name body['status'] = status body['total_amount_excluding_tax'] = total_amount_excluding_tax body['total_amount_including_tax'] = total_amount_including_tax body['total_tax_amount'] = total_tax_amount body['vendor_id'] = vendor_id body['vendor_invoice_number'] = vendor_invoice_number body['vendor_name'] = vendor_name body['vendor_number'] = vendor_number body['currency'] = currency body['purchase_invoice_lines'] = purchase_invoice_lines body['vendor'] = {} body['vendor']['id'] = microsoft_graph_entity_id body['vendor']['address'] = address body['vendor']['balance'] = balance body['vendor']['blocked'] = blocked body['vendor']['currency_code'] = microsoft_graph_vendor_currency_code body['vendor']['currency_id'] = microsoft_graph_vendor_currency_id body['vendor']['display_name'] = display_name body['vendor']['email'] = email body['vendor']['last_modified_date_time'] = microsoft_graph_vendor_last_modified_date_time_last_modified_date_time body['vendor']['number'] = microsoft_graph_vendor_number body['vendor']['payment_method_id'] = payment_method_id body['vendor']['payment_terms_id'] = payment_terms_id body['vendor']['phone_number'] = phone_number body['vendor']['tax_liable'] = tax_liable body['vendor']['tax_registration_number'] = tax_registration_number body['vendor']['website'] = website body['vendor']['currency'] = microsoft_graph_currency body['vendor']['payment_method'] = payment_method body['vendor']['payment_term'] = payment_term body['vendor']['picture'] = picture return client.update_purchase_invoices(company_id=company_id, purchase_invoice_id=purchase_invoice_id, body=body) def financials_financial_company_update_purchase_invoice_line(client, company_id, purchase_invoice_line_id, id_=None, account_id=None, amount_excluding_tax=None, amount_including_tax=None, description=None, discount_amount=None, discount_applied_before_tax=None, discount_percent=None, document_id=None, expected_receipt_date=None, invoice_discount_allocation=None, item_id=None, line_type=None, net_amount=None, net_amount_including_tax=None, net_tax_amount=None, quantity=None, sequence=None, tax_code=None, tax_percent=None, total_tax_amount=None, unit_cost=None, account=None, microsoft_graph_entity_id=None, base_unit_of_measure_id=None, blocked=None, display_name=None, gtin=None, inventory=None, item_category_code=None, item_category_id=None, last_modified_date_time=None, number=None, price_includes_tax=None, tax_group_code=None, tax_group_id=None, type_=None, number_unit_cost=None, unit_price=None, item_category=None, picture=None): body = {} body['id'] = id_ body['account_id'] = account_id body['amount_excluding_tax'] = amount_excluding_tax body['amount_including_tax'] = amount_including_tax body['description'] = description body['discount_amount'] = discount_amount body['discount_applied_before_tax'] = discount_applied_before_tax body['discount_percent'] = discount_percent body['document_id'] = document_id body['expected_receipt_date'] = expected_receipt_date body['invoice_discount_allocation'] = invoice_discount_allocation body['item_id'] = item_id body['line_type'] = line_type body['net_amount'] = net_amount body['net_amount_including_tax'] = net_amount_including_tax body['net_tax_amount'] = net_tax_amount body['quantity'] = quantity body['sequence'] = sequence body['tax_code'] = tax_code body['tax_percent'] = tax_percent body['total_tax_amount'] = total_tax_amount body['unit_cost'] = unit_cost body['account'] = account body['item'] = {} body['item']['id'] = microsoft_graph_entity_id body['item']['base_unit_of_measure_id'] = base_unit_of_measure_id body['item']['blocked'] = blocked body['item']['display_name'] = display_name body['item']['gtin'] = gtin body['item']['inventory'] = inventory body['item']['item_category_code'] = item_category_code body['item']['item_category_id'] = item_category_id body['item']['last_modified_date_time'] = last_modified_date_time body['item']['number'] = number body['item']['price_includes_tax'] = price_includes_tax body['item']['tax_group_code'] = tax_group_code body['item']['tax_group_id'] = tax_group_id body['item']['type'] = type_ body['item']['unit_cost'] = number_unit_cost body['item']['unit_price'] = unit_price body['item']['item_category'] = item_category body['item']['picture'] = picture return client.update_purchase_invoice_lines(company_id=company_id, purchase_invoice_line_id=purchase_invoice_line_id, body=body) def financials_financial_company_update_sale_credit_memo(client, company_id, sales_credit_memo_id, id_=None, billing_postal_address=None, bill_to_customer_id=None, bill_to_customer_number=None, bill_to_name=None, credit_memo_date=None, currency_code=None, currency_id=None, customer_id=None, customer_name=None, customer_number=None, discount_amount=None, discount_applied_before_tax=None, due_date=None, email=None, external_document_number=None, invoice_id=None, invoice_number=None, last_modified_date_time=None, number=None, payment_terms_id=None, phone_number=None, prices_include_tax=None, salesperson=None, selling_postal_address=None, status=None, total_amount_excluding_tax=None, total_amount_including_tax=None, total_tax_amount=None, currency=None, payment_term=None, sales_credit_memo_lines=None, microsoft_graph_entity_id=None, address=None, blocked=None, microsoft_graph_customer_currency_code=None, microsoft_graph_customer_currency_id=None, display_name=None, microsoft_graph_customer_email=None, microsoft_graph_customer_last_modified_date_time_last_modified_date_time=None, microsoft_graph_customer_number=None, payment_method_id=None, microsoft_graph_customer_payment_terms_id_payment_terms_id=None, microsoft_graph_customer_phone_number=None, shipment_method_id=None, tax_area_display_name=None, tax_area_id=None, tax_liable=None, tax_registration_number=None, type_=None, website=None, microsoft_graph_currency=None, payment_method=None, microsoft_graph_payment_term=None, picture=None, shipment_method=None): body = {} body['id'] = id_ body['billing_postal_address'] = billing_postal_address body['bill_to_customer_id'] = bill_to_customer_id body['bill_to_customer_number'] = bill_to_customer_number body['bill_to_name'] = bill_to_name body['credit_memo_date'] = credit_memo_date body['currency_code'] = currency_code body['currency_id'] = currency_id body['customer_id'] = customer_id body['customer_name'] = customer_name body['customer_number'] = customer_number body['discount_amount'] = discount_amount body['discount_applied_before_tax'] = discount_applied_before_tax body['due_date'] = due_date body['email'] = email body['external_document_number'] = external_document_number body['invoice_id'] = invoice_id body['invoice_number'] = invoice_number body['last_modified_date_time'] = last_modified_date_time body['number'] = number body['payment_terms_id'] = payment_terms_id body['phone_number'] = phone_number body['prices_include_tax'] = prices_include_tax body['salesperson']
""" Transforms raw CORD-19 data into an articles.sqlite SQLite database. """ import csv import hashlib import json import os.path import re import sqlite3 from collections import Counter from multiprocessing import Pool from dateutil import parser from nltk.tokenize import sent_tokenize from .grammar import Grammar from .metadata import Metadata from .schema import Schema # Global helper for multi-processing support # pylint: disable=W0603 GRAMMAR = None def getGrammar(): """ Multiprocessing helper method. Gets (or first creates then gets) a global grammar object to be accessed in a new subprocess. Returns: Grammar """ global GRAMMAR if not GRAMMAR: GRAMMAR = Grammar() return GRAMMAR class Execute(object): """ Transforms raw csv and json files into an articles.sqlite SQLite database. """ # SQL statements CREATE_TABLE = "CREATE TABLE IF NOT EXISTS {table} ({fields})" INSERT_ROW = "INSERT INTO {table} ({columns}) VALUES ({values})" @staticmethod def init(output): """ Connects initializes a new output SQLite database. Args: output: output directory, if None uses default path Returns: connection to new SQLite database """ # Default path if not provided if not output: output = os.path.join(os.path.expanduser("~"), ".cord19", "models") # Create if output path doesn't exist os.makedirs(output, exist_ok=True) # Output database file dbfile = os.path.join(output, "articles.sqlite") # Delete existing file if os.path.exists(dbfile): os.remove(dbfile) # Create output database db = sqlite3.connect(dbfile) # Create articles table Execute.create(db, Schema.ARTICLES, "articles") # Create sections table Execute.create(db, Schema.SECTIONS, "sections") # Create stats table Execute.create(db, Schema.STATS, "stats") # Create citations table Execute.create(db, Schema.CITATIONS, "citations") return db @staticmethod def create(db, table, name): """ Creates a SQLite table. Args: db: database connection table: table schema name: table name """ columns = ['{0} {1}'.format(name, ctype) for name, ctype in table.items()] create = Execute.CREATE_TABLE.format(table=name, fields=", ".join(columns)) # pylint: disable=W0703 try: db.execute(create) except Exception as e: print(create) print("Failed to create table: " + e) @staticmethod def insert(db, table, name, row): """ Builds and inserts a row. Args: db: database connection table: table object name: table name row: row to insert """ # Build insert prepared statement columns = [name for name, _ in table.items()] insert = Execute.INSERT_ROW.format(table=name, columns=", ".join(columns), values=("?, " * len(columns))[:-2]) try: # Execute insert statement db.execute(insert, Execute.values(table, row, columns)) # pylint: disable=W0703 except Exception as ex: print("Error inserting row: {}".format(row), ex) @staticmethod def values(table, row, columns): """ Formats and converts row into database types based on table schema. Args: table: table schema row: row tuple columns: column names Returns: Database schema formatted row tuple """ values = [] for x, column in enumerate(columns): # Get value value = row[x] if table[column].startswith("INTEGER"): values.append(int(value) if value else 0) elif table[column] == "BOOLEAN": values.append(1 if value == "TRUE" else 0) elif table[column] == "TEXT": # Clean empty text and replace with None values.append(value if value and len(value.strip()) > 0 else None) else: values.append(value) return values @staticmethod def getId(row): """ Gets id for this row. Builds one from the title if no body content is available. Args: row: input row Returns: sha1 hash id """ # Use sha1 provided, if available uid = row["sha"].split("; ")[0] if row["sha"] else None if not uid: # Fallback to sha1 of title uid = hashlib.sha1(row["title"].encode("utf-8")).hexdigest() return uid @staticmethod def getDate(row): """ Parses the publish date from the input row. Args: row: input row Returns: publish date """ date = row["publish_time"] if date: try: if date.isdigit() and len(date) == 4: # Default entries with just year to Jan 1 date += "-01-01" return parser.parse(date) # pylint: disable=W0702 except: # Skip parsing errors return None return None @staticmethod def getTags(sections): """ Searches input sections for matching keywords. If found, returns the keyword tag. Args: sections: list of text sections Returns: tags """ # Keyword patterns to search for keywords = [r"2019[\-\s]?n[\-\s]?cov", "2019 novel coronavirus", "coronavirus 2019", r"coronavirus disease (?:20)?19", r"covid(?:[\-\s]?19)?", r"n\s?cov[\-\s]?2019", r"sars-cov-?2", r"wuhan (?:coronavirus|cov|pneumonia)"] # Build regular expression for each keyword. Wrap term in word boundaries regex = "|".join(["\\b%s\\b" % keyword.lower() for keyword in keywords]) tags = None for _, text in sections: # Look for at least one keyword match if re.findall(regex, text.lower()): tags = "COVID-19" return tags @staticmethod def filtered(sections, citations): """ Returns a filtered list of text sections and citations. Duplicate and boilerplate text strings are removed. Args: sections: input sections citations: input citations Returns: filtered list of sections, citations """ # Use list to preserve insertion order unique = [] keys = set() # Boilerplate text to ignore boilerplate = ["COVID-19 resource centre", "permission to make all its COVID", "WHO COVID database"] for name, text in sections: # Add unique text that isn't boilerplate text if not text in keys and not any([x in text for x in boilerplate]): unique.append((name, text)) keys.add(text) return unique, list(set(citations)) @staticmethod def files(row, uids): """ Build a list of json file locations and names to parse. Args: row: input row uids: list of sha1 ids Returns: list of (directory, file name) """ # Parse both PMC and PDF json if available, sections will be de-duplicated paths = [("pdf_json", uid + ".json") for uid in uids] if row["has_pmc_xml_parse"].lower() == "true": paths.append(("pmc_json", row["pmcid"] + ".xml.json")) return paths @staticmethod def getSections(row, directory): """ Reads title, abstract and body text for a given row. Text is returned as a list of sections. Args: row: input row directory: input directory Returns: list of text sections """ sections = [] citations = [] # Get ids and subset uids = row["sha"].split("; ") if row["sha"] else None subset = row["full_text_file"] # Add title and abstract sections for name in ["title", "abstract"]: text = row[name] if text: # Remove leading and trailing [] text = re.sub(r"^\[", "", text) text = re.sub(r"\]$", "", text) sections.extend([(name.upper(), x) for x in sent_tokenize(text)]) if uids and subset: for location, filename in Execute.files(row, uids): # Build article path. Path has subset directory twice. article = os.path.join(directory, subset, subset, location, filename) try: with open(article) as jfile: data = json.load(jfile) # Extract text from each section for section in data["body_text"]: # Section name and text name = section["section"].upper() if len(section["section"].strip()) > 0 else None text = section["text"].replace("\n", " ") # Split text into sentences and add to sections sections.extend([(name, x) for x in sent_tokenize(text)]) # Extract text from each citation citations.extend([entry["title"] for entry in data["bib_entries"].values()]) # pylint: disable=W0703 except Exception as ex: print("Error processing text file: {}".format(article), ex) # Filter sections and return return Execute.filtered(sections, citations) @staticmethod def stream(directory): """ Generator that yields rows from a metadata.csv file. The directory is also included. Args: directory """ with open(os.path.join(directory, "metadata.csv"), mode="r") as csvfile: for row in csv.DictReader(csvfile): yield (row, directory) @staticmethod def process(params): """ Processes a single row Args: params: (row, directory) Returns: (id, article, sections) """ # Get grammar handle grammar = getGrammar() # Unpack parameters row, directory = params # Get uid uid = Execute.getId(row) # Published date date = Execute.getDate(row) # Get text sections sections, citations = Execute.getSections(row, directory) # Get tags tags = Execute.getTags(sections) if tags: # Build NLP tokens for sections tokenslist = grammar.parse([text for _, text in sections]) # Join NLP tokens with sections sections = [(name, text, tokenslist[x]) for x, (name, text) in enumerate(sections)] # Derive metadata fields design, keywords, size, sample, method, labels, risks = Metadata.parse(sections) # Add additional fields to each section sections = [(name, text, labels[x] if labels[x] else grammar.label(tokens), risks[x]) for x, (name, text, tokens) in enumerate(sections)] else: # Untagged section, create None default placeholders design, keywords, size, sample, method = None, None, None, None, None # Extend sections with empty columns sections = [(name, text, None, []) for name, text in sections] # Clear citations when not a tagged entry citations = None # Article row - id, source, published, publication, authors, title, tags, design, keywords, sample size # sample section, samplemethod, reference article = (uid, row["source_x"], date, row["journal"], row["authors"], row["title"], tags, design, keywords, size, sample, method, row["url"]) return (uid, article, sections, tags, design, citations) @staticmethod def run(directory, output): """ Main execution method. Args: directory: input directory output: output directory path """ print("Building articles.sqlite from {}".format(directory)) #
import os import json from Bio.Seq import Seq from django.test import TestCase import edge.recombine from edge.recombine import find_swap_region, recombine, remove_overhangs from edge.blastdb import build_all_genome_dbs, fragment_fasta_fn from edge.models import Genome, Fragment, Genome_Fragment, Operation class RemoveOverhangsTest(TestCase): def test_removes_front_overhang(self): self.assertEquals(remove_overhangs('(atg/)aa'), 'aa') def test_removes_back_overhang(self): self.assertEquals(remove_overhangs('aa(atg/)'), 'aa') def test_removes_front_and_back(self): self.assertEquals(remove_overhangs('(atg/)aa(atg/)'), 'aa') def test_does_not_remove_internal_overhang(self): self.assertEquals(remove_overhangs('(atg/)a(atg/)a(atg/)'), 'a(atg/)a') def test_does_not_remove_unclosed_overhang(self): self.assertEquals(remove_overhangs('(atg/aa'), '(atg/aa') self.assertEquals(remove_overhangs('atg/aa)'), 'atg/aa)') def test_works_with_single_char_input(self): self.assertEquals(remove_overhangs(')'), ')') self.assertEquals(remove_overhangs('('), '(') class GenomeRecombinationTest(TestCase): def setUp(self): self.old_check_junction_lu = edge.recombine.CHECK_JUNCTION_LEFT_UP self.old_check_junction_ld = edge.recombine.CHECK_JUNCTION_LEFT_DN self.old_check_junction_ru = edge.recombine.CHECK_JUNCTION_RIGHT_UP self.old_check_junction_rd = edge.recombine.CHECK_JUNCTION_RIGHT_DN edge.recombine.CHECK_JUNCTION_LEFT_UP = 10 edge.recombine.CHECK_JUNCTION_LEFT_DN = 40 edge.recombine.CHECK_JUNCTION_RIGHT_UP = 40 edge.recombine.CHECK_JUNCTION_RIGHT_DN = 10 self.old_single_cross_over_gap_max = edge.recombine.SINGLE_CROSSOVER_MAX_GAP edge.recombine.SINGLE_CROSSOVER_MAX_GAP = 10 def tearDown(self): edge.recombine.CHECK_JUNCTION_LEFT_UP = self.old_check_junction_lu edge.recombine.CHECK_JUNCTION_LEFT_DN = self.old_check_junction_ld edge.recombine.CHECK_JUNCTION_RIGHT_UP = self.old_check_junction_ru edge.recombine.CHECK_JUNCTION_RIGHT_DN = self.old_check_junction_rd edge.recombine.SINGLE_CROSSOVER_MAX_GAP = self.old_single_cross_over_gap_max def build_genome(self, circular, *templates): g = Genome(name='Foo') g.save() for seq in templates: f = Fragment.create_with_sequence('Bar', seq, circular=circular) Genome_Fragment(genome=g, fragment=f, inherited=False).save() try: os.unlink(fragment_fasta_fn(f)) except: pass build_all_genome_dbs(refresh=True) return Genome.objects.get(pk=g.id) def test_finds_correct_region_for_swapping(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) g = self.build_genome(False, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(upstream) + 1) self.assertEquals(r[0].end, len(template) - len(downstream)) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, False) self.assertEquals(r[0].front_arm, front_bs[0:arm_len]) self.assertEquals(r[0].back_arm, back_bs[-arm_len:]) def test_finding_swap_region_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([middle[8:], back_bs, downstream, upstream, front_bs, middle[0:8]]) g = self.build_genome(True, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(template) - 8 - len(front_bs) + 1) self.assertEquals(r[0].end, len(middle) - 8 + len(back_bs)) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, False) self.assertEquals(r[0].front_arm, front_bs[0:arm_len]) self.assertEquals(r[0].back_arm, back_bs[-arm_len:]) def test_finding_swap_region_when_front_arm_is_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([front_bs[8:], middle, back_bs, downstream, upstream, front_bs[0:8]]) g = self.build_genome(True, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(template) - 8 + 1) self.assertEquals(r[0].end, len(front_bs + middle + back_bs) - 8) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, False) self.assertEquals(r[0].front_arm, front_bs[0:arm_len]) self.assertEquals(r[0].back_arm, back_bs[-arm_len:]) def test_finding_swap_region_when_back_arm_is_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([back_bs[8:], downstream, upstream, front_bs, middle, back_bs[0:8]]) g = self.build_genome(True, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(back_bs) - 8 + len(downstream + upstream) + 1) self.assertEquals(r[0].end, len(back_bs) - 8) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, False) self.assertEquals(r[0].front_arm, front_bs[0:arm_len]) self.assertEquals(r[0].back_arm, back_bs[-arm_len:]) def test_finds_correct_region_for_swapping_with_reverse_complement_cassette(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = str(Seq(''.join([front_bs, replaced, back_bs])).reverse_complement()) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([upstream, front_bs, middle, back_bs, downstream]) g = self.build_genome(False, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(upstream) + 1) self.assertEquals(r[0].end, len(template) - len(downstream)) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, True) self.assertEquals(r[0].front_arm, str(Seq(back_bs[-arm_len:]).reverse_complement())) self.assertEquals(r[0].back_arm, str(Seq(front_bs[0:arm_len]).reverse_complement())) def test_finding_reverse_complement_region_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = str(Seq(''.join([front_bs, replaced, back_bs])).reverse_complement()) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([middle[8:], back_bs, downstream, upstream, front_bs, middle[0:8]]) g = self.build_genome(True, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(template) - 8 - len(front_bs) + 1) self.assertEquals(r[0].end, len(middle) - 8 + len(back_bs)) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, True) self.assertEquals(r[0].front_arm, str(Seq(back_bs[-arm_len:]).reverse_complement())) self.assertEquals(r[0].back_arm, str(Seq(front_bs[0:arm_len]).reverse_complement())) def test_finding_reverse_complement_region_when_front_arm_is_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = str(Seq(''.join([front_bs, replaced, back_bs])).reverse_complement()) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([front_bs[8:], middle, back_bs, downstream, upstream, front_bs[0:8]]) g = self.build_genome(True, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(template) - 8 + 1) self.assertEquals(r[0].end, len(front_bs + middle + back_bs) - 8) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, True) self.assertEquals(r[0].front_arm, str(Seq(back_bs[-arm_len:]).reverse_complement())) self.assertEquals(r[0].back_arm, str(Seq(front_bs[0:arm_len]).reverse_complement())) def test_finding_reverse_complement_region_when_back_arm_is_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = str(Seq(''.join([front_bs, replaced, back_bs])).reverse_complement()) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([back_bs[8:], downstream, upstream, front_bs, middle, back_bs[0:8]]) g = self.build_genome(True, template) r = find_swap_region(g, cassette, arm_len) self.assertEquals(len(r), 1) self.assertEquals(r[0].fragment_id, g.fragments.all()[0].id) self.assertEquals(r[0].fragment_name, g.fragments.all()[0].name) self.assertEquals(r[0].start, len(back_bs) - 8 + len(downstream + upstream) + 1) self.assertEquals(r[0].end, len(back_bs) - 8) self.assertEquals(r[0].sequence, ''.join([front_bs, middle, back_bs])) self.assertEquals(r[0].cassette_reversed, True) self.assertEquals(r[0].front_arm, str(Seq(back_bs[-arm_len:]).reverse_complement())) self.assertEquals(r[0].back_arm, str(Seq(front_bs[0:arm_len]).reverse_complement())) def test_recombines_correctly(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "a" * 100 template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) g = self.build_genome(False, template) c = recombine(g, cassette, arm_len) self.assertNotEqual(g.id, c.id) self.assertEquals(c.fragments.all()[0].indexed_fragment().sequence, ''.join([upstream, cassette, downstream])) def test_recombines_ignoring_extra_bases_upstream_and_downstream_of_cassette(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggagcgacgtagtctgcatctgatgcatgcactac" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagggcatcgtactactgatgcatgcacactgacgta" downstream = "gttaaggcgcgaacat" replaced = "a" * 100 template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = ''.join(['c' * 6 + front_bs, replaced, back_bs + 'c' * 6]) arm_len = min(len(front_bs), len(back_bs)) / 2 g = self.build_genome(False, template) c = recombine(g, cassette, arm_len) self.assertNotEqual(g.id, c.id) self.assertEquals(c.fragments.all()[0].indexed_fragment().sequence, ''.join([upstream, front_bs, replaced, back_bs, downstream])) def test_creates_operation(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) g = self.build_genome(False, template) self.assertEquals(Operation.objects.count(), 0) c = recombine(g, cassette, arm_len) self.assertEquals(Operation.objects.count(), 1) self.assertEquals(c.operation_set.all()[0].type, Operation.RECOMBINATION[0]) self.assertEquals(c.operation_set.all()[0].params, json.dumps(dict(cassette=cassette, homology_arm_length=arm_len))) def test_annotates_cassette(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) g = self.build_genome(False, template) a = g.fragments.all()[0].indexed_fragment().annotations() self.assertEquals(len(a), 0) c = recombine(g, cassette, arm_len) a = c.fragments.all()[0].indexed_fragment().annotations() self.assertEquals(len(a), 1) self.assertEquals(a[0].base_first, len(upstream) + 1) self.assertEquals(a[0].base_last, len(upstream + cassette)) self.assertEquals(a[0].feature_base_first, 1) self.assertEquals(a[0].feature_base_last, len(cassette)) self.assertEquals(a[0].feature.strand, 1) self.assertEquals(a[0].feature.operation.type, Operation.RECOMBINATION[0]) self.assertEquals(a[0].feature.operation.genome, c) def test_annotates_reversed_cassette(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = str(Seq(''.join([front_bs, replaced, back_bs])).reverse_complement()) arm_len = min(len(front_bs), len(back_bs)) g = self.build_genome(False, template) a = g.fragments.all()[0].indexed_fragment().annotations() self.assertEquals(len(a), 0) c = recombine(g, cassette, arm_len) a = c.fragments.all()[0].indexed_fragment().annotations() self.assertEquals(len(a), 1) self.assertEquals(a[0].base_first, len(upstream) + 1) self.assertEquals(a[0].base_last, len(upstream + cassette)) self.assertEquals(a[0].feature_base_first, 1) self.assertEquals(a[0].feature_base_last, len(cassette)) # on reverse strand self.assertEquals(a[0].feature.strand, -1) self.assertEquals(a[0].feature.operation.type, Operation.RECOMBINATION[0]) self.assertEquals(a[0].feature.operation.genome, c) def test_integrates_and_annotates_cassette_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([middle[8:], back_bs, downstream, upstream, front_bs, middle[0:8]]) g = self.build_genome(True, template) c = recombine(g, cassette, arm_len) self.assertNotEqual(g.id, c.id) self.assertEquals(c.fragments.all()[0].indexed_fragment().sequence, ''.join([downstream, upstream, cassette])) a = c.fragments.all()[0].indexed_fragment().annotations() self.assertEquals(len(a), 1) self.assertEquals(a[0].base_first, len(downstream + upstream) + 1) self.assertEquals(a[0].base_last, len(downstream + upstream + cassette)) self.assertEquals(a[0].feature_base_first, 1) self.assertEquals(a[0].feature_base_last, len(cassette)) self.assertEquals(a[0].feature.strand, 1) self.assertEquals(a[0].feature.operation.type, Operation.RECOMBINATION[0]) self.assertEquals(a[0].feature.operation.genome, c) def test_recombine_when_front_arm_is_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([front_bs[8:], middle, back_bs, downstream, upstream, front_bs[0:8]]) g = self.build_genome(True, template) c = recombine(g, cassette, arm_len) self.assertNotEqual(g.id, c.id) self.assertEquals(c.fragments.all()[0].indexed_fragment().sequence, ''.join([downstream, upstream, cassette])) def test_recombine_when_back_arm_is_across_circular_boundary(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" cassette = ''.join([front_bs, replaced, back_bs]) arm_len = min(len(front_bs), len(back_bs)) template = ''.join([back_bs[8:], downstream, upstream, front_bs, middle, back_bs[0:8]]) g = self.build_genome(True, template) c = recombine(g, cassette, arm_len) self.assertNotEqual(g.id, c.id) self.assertEquals(c.fragments.all()[0].indexed_fragment().sequence, ''.join([downstream, upstream, cassette])) def test_recombines_with_reverse_complement_cassette_correctly(self): upstream = "gagattgtccgcgtttt" front_bs = "catagcgcacaggacgcggag" middle = "cggcacctgtgagccg" back_bs = "taatgaccccgaagcagg" downstream = "gttaaggcgcgaacat" replaced = "aaaaaaaaaaaaaaaaaaa" template = ''.join([upstream, front_bs, middle, back_bs, downstream]) cassette = str(Seq(''.join([front_bs, replaced, back_bs])).reverse_complement()) arm_len = min(len(front_bs), len(back_bs)) g = self.build_genome(False, template) c = recombine(g, cassette, arm_len) self.assertNotEqual(g.id, c.id) self.assertEquals(c.fragments.all()[0].indexed_fragment().sequence, ''.join([upstream, front_bs,
<gh_stars>0 from torch import nn from .layer import BertOnlyMLMHead from collections import defaultdict from torch.nn import functional as F import torch import numpy as np from .do_calculus import Matcher, BoxCoder, BalancedPositiveNegativeSampler, FastRCNNLossComputation from horovod import torch as hvd from .ot import optimal_transport_dist from .pretrain import RegionFeatureRegression, RegionFeatureRegression, RegionClassification from .do_calculus import FPNPredictor, CausalPredictor_1, CausalPredictor_2, CausalPredictor_3 from .united_do_calculus import Causal_v, Causal_t from .layer import GELU, BertOnlyMLMHead, BertImagePredictionHead from adapter.src.transformers.adapters.model_mixin import ModelWithHeadsAdaptersMixin from .adapter_uniter import UniterConfig, UniterPreTrainedModel, UniterModel class UniterForPretraining(UniterPreTrainedModel): """ UNITER pretraining """ def __init__(self, config, img_dim, img_label_dim): super().__init__(config) self.uniter = UniterModel(config, img_dim) self.cls = BertOnlyMLMHead( config, self.uniter.embeddings.word_embeddings.weight) ### pretrain by vc feat self.feat_regress_vc = RegionFeatureRegression( config.hidden_size, 1024, self.uniter.img_embeddings.vc_img_linear.weight) ### self.feat_regress = RegionFeatureRegression( config.hidden_size, img_dim, self.uniter.img_embeddings.img_linear.weight) self.region_classifier = RegionClassification( config.hidden_size, img_label_dim) self.itm_output = nn.Linear(config.hidden_size, 2) self.apply(self.init_weights) ### use 'do-calculus' in UNITER pretrain 2: make method ''' self.predictor = FPNPredictor(config, img_dim) # use 'do-calculus' in UNITER pretrain 2 self.causal_predictor_1 = CausalPredictor_1(config, img_dim) # use 'do-calculus' in UNITER pretrain 2 self.causal_predictor_2 = CausalPredictor_2(config, config.hidden_size) self.causal_predictor_3 = CausalPredictor_3(config, img_dim) self.Wx = nn.Linear(config.hidden_size, config.hidden_size) nn.init.normal_(self.Wx.weight, std=0.02) nn.init.constant_(self.Wx.bias, 0) self.causal_score = nn.Linear(2*config.hidden_size, 1601) nn.init.normal_(self.causal_score.weight, std=0.01) nn.init.constant_(self.causal_score.bias, 0) ### ### use 'do-calculus' in UNITER pretrain embedder (version 3) ''' self.causal_v = Causal_v() self.causal_predictor_v = BertImagePredictionHead(config, 2048) self.causal_t = Causal_t() self.base_model_prefix = 'uniter' def forward(self, batch, task, compute_loss=True): batch = defaultdict(lambda: None, batch) input_ids = batch['input_ids'] position_ids = batch['position_ids'] img_feat = batch['img_feat'] img_pos_feat = batch['img_pos_feat'] attention_mask = batch['attn_masks'] gather_index = batch['gather_index'] if task == 'mlm': txt_labels = batch['txt_labels'] return self.forward_mlm(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, compute_loss) elif task == 'mrfr': img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrfr_feat_target = batch['feat_targets'] return self.forward_mrfr(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrfr_feat_target, compute_loss) elif task == 'itm': targets = batch['targets'] ot_inputs = batch['ot_inputs'] return self.forward_itm(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, targets, ot_inputs, compute_loss) elif task.startswith('mrc'): img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrc_label_target = batch['label_targets'] return self.forward_mrc(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, task, compute_loss) else: raise ValueError('invalid task') def forward_mlm(self, input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, compute_loss=True): sequence_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False) # get only the text part sequence_output = sequence_output[:, :input_ids.size(1), :] # only compute masked tokens for better efficiency masked_output = self._compute_masked_hidden(sequence_output, txt_labels != -1) prediction_scores = self.cls(masked_output) if compute_loss: masked_lm_loss = F.cross_entropy(prediction_scores, txt_labels[txt_labels != -1], reduction='none') return masked_lm_loss else: return prediction_scores def _compute_masked_hidden(self, hidden, mask): """ get only the masked region (don't compute unnecessary hiddens) """ mask = mask.unsqueeze(-1).expand_as(hidden) hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1)) return hidden_masked def forward_mrfr(self, input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, feat_targets, compute_loss=True): sequence_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks) # only compute masked tokens for better efficiency masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) prediction_feat = self.feat_regress(masked_output) if compute_loss: mrfr_loss = F.mse_loss(prediction_feat, feat_targets, reduction='none') return mrfr_loss else: return prediction_feat def forward_itm(self, input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, targets, ot_inputs, compute_loss=True): sequence_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False) pooled_output = self.uniter.pooler(sequence_output) itm_scores = self.itm_output(pooled_output) # OT loss if ot_inputs is not None: ot_scatter = ot_inputs['ot_scatter'] b = sequence_output.size(0) tl = input_ids.size(1) il = img_feat.size(1) max_l = max(ot_inputs['scatter_max'] + 1, tl+il) ot_scatter = ot_scatter.unsqueeze(-1).expand_as(sequence_output) ctx_emb = torch.zeros(b, max_l, self.config.hidden_size, dtype=sequence_output.dtype, device=sequence_output.device ).scatter_(dim=1, index=ot_scatter, src=sequence_output) txt_emb = ctx_emb[:, :tl, :] img_emb = ctx_emb[:, tl:tl+il, :] txt_pad = ot_inputs['txt_pad'] img_pad = ot_inputs['img_pad'] # NOTE: run in fp32 for stability ot_dist = optimal_transport_dist(txt_emb.float(), img_emb.float(), txt_pad, img_pad).to(txt_emb) ot_pos_dist = ot_dist.masked_select(targets == 1) ot_neg_dist = ot_dist.masked_select(targets == 0) ot_loss = (ot_pos_dist, ot_neg_dist) else: ot_loss = None if compute_loss: itm_loss = F.cross_entropy(itm_scores, targets, reduction='none') return itm_loss, ot_loss else: return itm_scores, ot_loss def forward_mrc(self, input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, task, compute_loss=True): sequence_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks) # only compute masked regions for better efficiency masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) prediction_soft_label = self.region_classifier(masked_output) if compute_loss: if "kl" in task: prediction_soft_label = F.log_softmax( prediction_soft_label, dim=-1) mrc_loss = F.kl_div( prediction_soft_label, label_targets, reduction='none') else: # background class should not be the target label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1 mrc_loss = F.cross_entropy( prediction_soft_label, label_targets, ignore_index=0, reduction='none') return mrc_loss else: return prediction_soft_label class UniterAdapterForPretrainingForVCR(ModelWithHeadsAdaptersMixin, UniterForPretraining): """ 2nd Stage Pretrain UNITER for VCR """ def init_type_embedding(self): new_emb = nn.Embedding(4, self.uniter.config.hidden_size) new_emb.apply(self.init_weights) for i in [0, 1]: emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :] new_emb.weight.data[i, :].copy_(emb) emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :] new_emb.weight.data[2, :].copy_(emb) new_emb.weight.data[3, :].copy_(emb) self.uniter.embeddings.token_type_embeddings = new_emb def init_word_embedding(self, num_special_tokens): orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0) new_emb = nn.Embedding( orig_word_num + num_special_tokens, self.uniter.config.hidden_size) new_emb.apply(self.init_weights) emb = self.uniter.embeddings.word_embeddings.weight.data new_emb.weight.data[:orig_word_num, :].copy_(emb) self.uniter.embeddings.word_embeddings = new_emb self.cls = BertOnlyMLMHead( self.uniter.config, self.uniter.embeddings.word_embeddings.weight) self.causal_predictor_t = BertOnlyMLMHead(self.uniter.config, self.uniter.embeddings.word_embeddings.weight) def forward(self, batch, task, compute_loss=True): batch = defaultdict(lambda: None, batch) input_ids = batch['input_ids'] position_ids = batch['position_ids'] img_feat = batch['img_feat'] img_pos_feat = batch['img_pos_feat'] attention_mask = batch['attn_masks'] gather_index = batch['gather_index'] txt_type_ids = batch['txt_type_ids'] txt_lens = batch['txt_lens'] num_bbs = batch['num_bbs'] img_soft_labels = batch['img_soft_labels'] if task == 'mlm': txt_labels = batch['txt_labels'] causal_labels = batch['causal_labels'] ''' return self.forward_mlm(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) return self.forward_mlm_dc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) return self.forward_mlm_dc_all(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' return self.forward_mlm_dc_unmasked(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' return self.forward_mlm_dc_unmasked_orthogonal(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' elif task == 'mrfr': img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrfr_feat_target = batch['feat_targets'] ### pretrain by vc_feat vc_feat = batch['vc_feat'] mrfr_vc_feat_target = batch['vc_feat_targets'] ''' return self.forward_mrfr(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrfr_feat_target, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' return self.forward_mrfr_vc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrfr_feat_target, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' return self.forward_mrfr_vc_orthogonal(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrfr_feat_target, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' elif task.startswith('mrc'): img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrc_label_target = batch['label_targets'] ### make label for unmasked object token (for 1_2) img_unmask_tgt = batch['img_unmask_tgt'] mrc_label_target_unmasked = batch['label_targets_unmasked'] ### ''' return self.forward_mrc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_mrc_dc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_mrc_dc_all(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' return self.forward_mrc_dc_unmasked(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' return self.forward_mrc_dc_unmasked_orthogonal(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' elif task.startswith('ortho'): img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrc_label_target = batch['label_targets'] ### make label for unmasked object token (for 1_2) img_unmask_tgt = batch['img_unmask_tgt'] mrc_label_target_unmasked = batch['label_targets_unmasked'] ''' return self.forward_orthogonal(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' return self.forward_orthogonal_causal_confounder(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) elif task.startswith('dc'): img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrc_label_target = batch['label_targets'] ''' return self.forward_dc_1(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_2(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_3(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_4(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_5(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' raise ValueError('dc is invalid task now') else: raise ValueError('invalid task') ### use 'do-calculus' in UNITER pretrain : make method def do_calculus_1(self, sequence_output, img_feats, proposals, txt_lens, num_bbs): """ Arguments: - sequence_output : "img + txt" output Returns: """ image_uniter_outputs = [] img_feat_list = [] i = 0 for (output, txt_len) in zip(sequence_output, txt_lens): image_uniter_output = output[txt_len:txt_len+num_bbs[i]] image_uniter_outputs.append(image_uniter_output) i+=1 for (img_feat, num_bb) in zip(img_feats, num_bbs): real_img_feat = img_feat[:num_bb] img_feat_list.append(real_img_feat) assert len(image_uniter_outputs) == len(num_bbs) assert len(image_uniter_outputs) ==
max(loc.rfind(slash) for slash in _SLASHES) autotrim_prefix = loc[0:slash_pos] if slash_pos > -1 else None elif len(records) > 1: common_prefix = records[0]["Location"].strip() for record in records[1:]: for (char_pos, char) in enumerate(record["Location"].strip()): if char_pos >= len(common_prefix): break if char != common_prefix[char_pos]: common_prefix = common_prefix[0:char_pos] break if not common_prefix: break if common_prefix: autotrim_prefix = common_prefix.upper() if autotrim_prefix and not any( p.startswith(autotrim_prefix.strip().upper()) for p in prefixes ): prefixes.append(autotrim_prefix) self._path_prefixes_upper = prefixes or None # Clear the untrimmed records cached by get_records() above. self._cached_records = None def init_default_line_number_1(self): """ Some SARIF records lack a line number. If this method is called, the default line number "1" is substituted in that case in the records returned by get_records(). Otherwise, None is returned. """ self._default_line_number = "1" self._cached_records = None def init_blame_filter( self, filter_description, include_substrings, include_regexes, exclude_substrings, exclude_regexes, ): """ Set up blame filtering. This is applied to the author_mail field added to the "blame" property bag in each SARIF file. Raises an error if any of the SARIF files don't contain blame information. If only inclusion criteria are provided, only issues matching the inclusion criteria are considered. If only exclusion criteria are provided, only issues not matching the exclusion criteria are considered. If both are provided, only issues matching the inclusion criteria and not matching the exclusion criteria are considered. include_substrings = substrings of author_mail to filter issues for inclusion. include_regexes = regular expressions for author_mail to filter issues for inclusion. exclude_substrings = substrings of author_mail to filter issues for exclusion. exclude_regexes = regular expressions for author_mail to filter issues for exclusion. """ self._filter.init_blame_filter( filter_description, include_substrings, include_regexes, exclude_substrings, exclude_regexes, ) # Clear the unfiltered records cached by get_records() above. self._cached_records = None def get_tool_name(self) -> str: """ Get the tool name from this run. """ return self.run_data["tool"]["driver"]["name"] def get_conversion_tool_name(self) -> Optional[str]: """ Get the conversion tool name from this run, if any. """ if "conversion" in self.run_data: return ( self.run_data["conversion"]["tool"].get("driver", {}).get("name", None) ) return None def get_results(self) -> List[Dict]: """ Get the results from this run. These are the Result objects as defined in the SARIF standard section 3.27. The results are filtered if a filter has ben configured. https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638 """ return self._filter.filter_results(self.run_data["results"]) def get_records(self) -> List[Dict]: """ Get simplified records derived from the results of this run. The records have the keys defined in `RECORD_ATTRIBUTES`. """ if not self._cached_records: results = self.get_results() self._cached_records = [self.result_to_record(result) for result in results] return self._cached_records def get_records_grouped_by_severity(self) -> Dict[str, List[Dict]]: """ Get the records, grouped by severity. """ return _group_records_by_severity(self.get_records()) def result_to_record(self, result): """ Convert a SARIF result object to a simple record with fields "Tool", "Location", "Line", "Severity" and "Code". See definition of result object here: https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638 """ error_id = result["ruleId"] tool_name = self.get_tool_name() (file_path, line_number) = _read_result_location(result) if not file_path: raise ValueError(f"No location in {error_id} output from {tool_name}") if not line_number: line_number = "1" if self._path_prefixes_upper: file_path_upper = file_path.upper() for prefix in self._path_prefixes_upper: if file_path_upper.startswith(prefix): prefixlen = len(prefix) if len(file_path) > prefixlen and file_path[prefixlen] in _SLASHES: # Strip off trailing path separator file_path = file_path[prefixlen + 1 :] else: file_path = file_path[prefixlen:] break # Get the error severity, if included, and code severity = result.get( "level", "warning" ) # If an error has no specified level then by default it is a warning message = result["message"]["text"] # Create a dict representing this result record = { "Tool": tool_name, "Location": file_path, "Line": line_number, "Severity": severity, "Code": f"{error_id} {message}", } return record def get_result_count(self) -> int: """ Return the total number of results. """ return len(self.get_results()) def get_result_count_by_severity(self) -> Dict[str, int]: """ Return a dict from SARIF severity to number of records. """ records = self.get_records() return { severity: sum(1 for record in records if severity in record["Severity"]) for severity in SARIF_SEVERITIES } def get_issue_code_histogram(self, severity) -> List[Tuple]: """ Return a list of pairs (code, count) of the records with the specified severities. """ return _count_records_by_issue_code(self.get_records(), severity) def get_filter_stats(self) -> Optional[FilterStats]: """ Get the number of records that were included or excluded by the filter. """ return self._filter.get_filter_stats() class SarifFile: """ Class to hold SARIF data parsed from a file and provide accesssors to the data. """ def __init__(self, file_path, data): self.abs_file_path = os.path.abspath(file_path) self.data = data self.runs = [ SarifRun(self, run_index, run_data) for (run_index, run_data) in enumerate(self.data.get("runs", [])) ] def __bool__(self): """ True if non-empty. """ return bool(self.runs) def init_path_prefix_stripping(self, autotrim=False, path_prefixes=None): """ Set up path prefix stripping. When records are subsequently obtained, the start of the path is stripped. If no path_prefixes are specified, the default behaviour is to strip the common prefix from each run. If path prefixes are specified, the specified prefixes are stripped. """ for run in self.runs: run.init_path_prefix_stripping(autotrim, path_prefixes) def init_default_line_number_1(self): """ Some SARIF records lack a line number. If this method is called, the default line number "1" is substituted in that case in the records returned by get_records(). Otherwise, None is returned. """ for run in self.runs: run.init_default_line_number_1() def init_blame_filter( self, filter_description, include_substrings, include_regexes, exclude_substrings, exclude_regexes, ): """ Set up blame filtering. This is applied to the author_mail field added to the "blame" property bag in each SARIF file. Raises an error if any of the SARIF files don't contain blame information. If only inclusion criteria are provided, only issues matching the inclusion criteria are considered. If only exclusion criteria are provided, only issues not matching the exclusion criteria are considered. If both are provided, only issues matching the inclusion criteria and not matching the exclusion criteria are considered. include_substrings = substrings of author_mail to filter issues for inclusion. include_regexes = regular expressions for author_mail to filter issues for inclusion. exclude_substrings = substrings of author_mail to filter issues for exclusion. exclude_regexes = regular expressions for author_mail to filter issues for exclusion. """ for run in self.runs: run.init_blame_filter( filter_description, include_substrings, include_regexes, exclude_substrings, exclude_regexes, ) def get_abs_file_path(self) -> str: """ Get the absolute file path from which this SARIF data was loaded. """ return self.abs_file_path def get_file_name(self) -> str: """ Get the file name from which this SARIF data was loaded. """ return os.path.basename(self.abs_file_path) def get_file_name_without_extension(self) -> str: """ Get the file name from which this SARIF data was loaded, without extension. """ file_name = self.get_file_name() return file_name[0 : file_name.index(".")] if "." in file_name else file_name def get_file_name_extension(self) -> str: """ Get the extension of the file name from which this SARIF data was loaded. Initial "." exlcuded. """ file_name = self.get_file_name() return file_name[file_name.index(".") + 1 :] if "." in file_name else "" def get_filename_timestamp(self) -> str: """ Extract the timestamp from the filename and return the date-time string extracted. """ parsed_date = re.findall(DATETIME_REGEX, self.get_file_name()) return parsed_date if len(parsed_date) == 1 else None def get_distinct_tool_names(self): """ Return a list of tool names that feature in the runs in this file. The list is deduplicated and sorted into alphabetical order. """ return sorted(list(set(run.get_tool_name() for run in self.runs))) def get_results(self) -> List[Dict]: """ Get the results from all runs in this file. These are the Result objects as defined in the SARIF standard section 3.27. https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638 """ ret = [] for run in self.runs: ret += run.get_results() return ret def get_records(self) -> List[Dict]: """ Get simplified records derived from the results of all runs. The records have the keys defined in `RECORD_ATTRIBUTES`. """ ret = [] for run in self.runs: ret += run.get_records() return ret def get_records_grouped_by_severity(self) -> Dict[str, List[Dict]]: """ Get the records, grouped by severity. """ return _group_records_by_severity(self.get_records()) def get_result_count(self) -> int: """ Return the total number of results. """ return sum(run.get_result_count() for run in self.runs) def get_result_count_by_severity(self) -> Dict[str, int]: """ Return a dict from SARIF severity to number of records. """ get_result_count_by_severity_per_run = [ run.get_result_count_by_severity() for run in self.runs ] return { severity: sum( rc.get(severity, 0) for rc in get_result_count_by_severity_per_run ) for severity in SARIF_SEVERITIES } def get_issue_code_histogram(self, severity) -> List[Tuple]: """
for c in self.gtr.alphabet if c not in [self.gtr.ambiguous, '-']} total = np.sum(list(character_counts.values())) additional_columns = [(c,int(np.round(self.additional_constant_sites*n/total))) for c, n in character_counts.items()] columns_left = self.additional_constant_sites pi = len(positions) for c,n in additional_columns: if c==additional_columns[-1][0]: # make sure all additions add up to the correct number to avoid rounding n = columns_left str_pat = c*len(self.aln) pos_list = list(range(pi, pi+n)) if str_pat in alignment_patterns: alignment_patterns[str_pat][1].extend(pos_list) else: alignment_patterns[str_pat] = (len(tmp_reduced_aln), pos_list) tmp_reduced_aln.append(np.array(list(str_pat))) pi += n columns_left -= n # count how many times each column is repeated in the real alignment self.multiplicity = np.zeros(len(alignment_patterns)) for p, pos in alignment_patterns.values(): self.multiplicity[p]=len(pos) # create the reduced alignment as np array self.reduced_alignment = np.array(tmp_reduced_aln).T # create map to compress a sequence for p, pos in alignment_patterns.values(): self.full_to_reduced_sequence_map[np.array(pos)]=p # create a map to reconstruct full sequence from the reduced (compressed) sequence for p, val in alignment_patterns.items(): self.reduced_to_full_sequence_map[val[0]]=np.array(val[1], dtype=int) # assign compressed sequences to all nodes of the tree, which have sequence assigned # for dict we cannot assume this is in the same order, as it does below! # so do it explicitly # # sequences are overwritten during reconstruction and # ambiguous sites change. Keep orgininals for reference if self.is_vcf: seq_reduce_align = {n:self.reduced_alignment[i] for i, n in enumerate(seqNames)} for n in self.tree.find_clades(): if hasattr(n, 'sequence'): n.original_cseq = seq_reduce_align[n.name] n.cseq = np.copy(n.original_cseq) else: # NOTE the order of tree traversal must be the same as above to catch the # index in the reduced alignment correctly seq_count = 0 for n in self.tree.find_clades(): if hasattr(n, 'sequence'): n.original_cseq = self.reduced_alignment[seq_count] n.cseq = np.copy(n.original_cseq) seq_count+=1 else: n.original_cseq = None n.cseq = None self.logger("TreeAnc: constructed reduced alignment...", 1) return ttconf.SUCCESS def process_alignment_dict(self): """ prepare the dictionary specifying differences from a reference sequence to construct the reduced alignment with variable sites only. NOTE: - sites can be constant but different from the reference - sites can be constant plus a ambiguous sites assigns ------- - self.nonref_positions: at least one sequence is different from ref Returns ------- reduced_alignment_const reduced alignment accounting for non-variable postitions alignment_patterns_const dict pattern -> (pos in reduced alignment, list of pos in full alignment) ariable_positions list of variable positions needed to construct remaining """ # number of sequences in alignment nseq = len(self.aln) inv_map = defaultdict(list) for k,v in self.aln.items(): for pos, bs in v.items(): inv_map[pos].append(bs) self.nonref_positions = np.sort(list(inv_map.keys())) self.inferred_const_sites = [] ambiguous_char = self.gtr.ambiguous nonref_const = [] nonref_alleles = [] ambiguous_const = [] variable_pos = [] for pos, bs in inv_map.items(): #loop over positions and patterns bases = "".join(np.unique(bs)) if len(bs) == nseq: if (len(bases)<=2 and ambiguous_char in bases) or len(bases)==1: # all sequences different from reference, but only one state # (other than ambiguous_char) in column nonref_const.append(pos) nonref_alleles.append(bases.replace(ambiguous_char, '')) if ambiguous_char in bases: #keep track of sites 'made constant' self.inferred_const_sites.append(pos) else: # at least two non-reference alleles variable_pos.append(pos) else: # not every sequence different from reference if bases==ambiguous_char: ambiguous_const.append(pos) self.inferred_const_sites.append(pos) #keep track of sites 'made constant' else: # at least one non ambiguous non-reference allele not in # every sequence variable_pos.append(pos) refMod = np.array(list(self.ref)) # place constant non reference positions by their respective allele refMod[nonref_const] = nonref_alleles # mask variable positions states = self.gtr.alphabet # maybe states = np.unique(refMod) refMod[variable_pos] = '.' # for each base in the gtr, make constant alignment pattern and # assign it to all const positions in the modified reference sequence reduced_alignment_const = [] alignment_patterns_const = {} for base in states: p = base*nseq pos = list(np.where(refMod==base)[0]) #if the alignment doesn't have a const site of this base, don't add! (ex: no '----' site!) if len(pos): alignment_patterns_const[p] = [len(reduced_alignment_const), pos] reduced_alignment_const.append(list(p)) return reduced_alignment_const, alignment_patterns_const, variable_pos def prepare_tree(self): """ Set link to parent and calculate distance to root for all tree nodes. Should be run once the tree is read and after every rerooting, topology change or branch length optimizations. """ self.tree.root.branch_length = 0.001 self.tree.root.mutation_length = self.tree.root.branch_length self.tree.root.mutations = [] self.tree.ladderize() self._prepare_nodes() self._leaves_lookup = {node.name:node for node in self.tree.get_terminals()} def _prepare_nodes(self): """ Set auxilliary parameters to every node of the tree. """ self.tree.root.up = None self.tree.root.bad_branch=self.tree.root.bad_branch if hasattr(self.tree.root, 'bad_branch') else False name_set = set([n.name for n in self.tree.find_clades() if n.name]) internal_node_count = 0 for clade in self.tree.get_nonterminals(order='preorder'): # parents first if clade.name is None: tmp = "NODE_" + format(internal_node_count, '07d') while tmp in name_set: internal_node_count += 1 tmp = "NODE_" + format(internal_node_count, '07d') clade.name = tmp name_set.add(clade.name) internal_node_count+=1 for c in clade.clades: if c.is_terminal(): c.bad_branch = c.bad_branch if hasattr(c, 'bad_branch') else False c.up = clade for clade in self.tree.get_nonterminals(order='postorder'): # parents first clade.bad_branch = all([c.bad_branch for c in clade]) self._calc_dist2root() self._internal_node_count = max(internal_node_count, self._internal_node_count) def _calc_dist2root(self): """ For each node in the tree, set its root-to-node distance as dist2root attribute """ self.tree.root.dist2root = 0.0 for clade in self.tree.get_nonterminals(order='preorder'): # parents first for c in clade.clades: if not hasattr(c, 'mutation_length'): c.mutation_length=c.branch_length c.dist2root = c.up.dist2root + c.mutation_length #################################################################### ## END SET-UP #################################################################### def infer_gtr(self, marginal=False, site_specific=False, normalized_rate=True, fixed_pi=None, pc=5.0, **kwargs): """ Calculates a GTR model given the multiple sequence alignment and the tree. It performs ancestral sequence inferrence (joint or marginal), followed by the branch lengths optimization. Then, the numbers of mutations are counted in the optimal tree and related to the time within the mutation happened. From these statistics, the relative state transition probabilities are inferred, and the transition matrix is computed. The result is used to construct the new GTR model of type 'custom'. The model is assigned to the TreeAnc and is used in subsequent analysis. Parameters ----------- print_raw : bool If True, print the inferred GTR model marginal : bool If True, use marginal sequence reconstruction normalized_rate : bool If True, sets the mutation rate prefactor to 1.0. fixed_pi : np.array Provide the equilibrium character concentrations. If None is passed, the concentrations will be inferred from the alignment. pc: float Number of pseudo counts to use in gtr inference Returns ------- gtr : GTR The inferred GTR model """ # if ancestral sequences are not in place, reconstruct them if marginal and (not hasattr(self.tree.root,'marginal_profile')): self._ml_anc_marginal(final=True, **kwargs) elif (not hasattr(self.tree.root,'cseq')) or self.tree.root.cseq is None: self._ml_anc_joint(final=True, **kwargs) if (self.tree is None) or (self.aln is None): self.logger("TreeAnc.infer_gtr: ERROR, alignment or tree are missing", 0) return ttconf.ERROR alpha = list(self.gtr.alphabet) n=len(alpha) L = len(self.tree.root.cseq) # matrix of mutations n_{ij}: i = derived state, j=ancestral state n_ija = np.zeros((n,n,L)) T_ia = np.zeros((n,L)) self.logger("TreeAnc.infer_gtr: counting mutations...", 2) for node in self.tree.get_nonterminals(): for c in node: if marginal: mut_stack = np.transpose(self.get_branch_mutation_matrix(c, full_sequence=False), (1,2,0)) T_ia += 0.5*self._branch_length_to_gtr(c) * mut_stack.sum(axis=0) * self.multiplicity T_ia += 0.5*self._branch_length_to_gtr(c) * mut_stack.sum(axis=1) * self.multiplicity n_ija += mut_stack * self.multiplicity elif hasattr(c,'mutations'): for a,pos, d in c.mutations: i,j = alpha.index(d), alpha.index(a) cpos = self.full_to_reduced_sequence_map[pos] n_ija[i,j,cpos]+=1 T_ia[j,cpos] += 0.5*self._branch_length_to_gtr(c) T_ia[i,cpos] -= 0.5*self._branch_length_to_gtr(c) for pos,nuc in enumerate(c.cseq): i = alpha.index(nuc) T_ia[i,pos] += self._branch_length_to_gtr(c)*self.multiplicity[pos] self.logger("TreeAnc.infer_gtr: counting mutations...done", 3) if site_specific: if marginal: root_state = self.tree.root.marginal_profile.T else: root_state = seq2prof(self.tree.root.cseq, self.gtr.profile_map).T self._gtr = GTR_site_specific.infer(n_ija, T_ia, pc=pc, root_state=root_state, logger=self.logger, alphabet=self.gtr.alphabet, prof_map=self.gtr.profile_map) else: root_state = np.array([np.sum((self.tree.root.cseq==nuc)*self.multiplicity) for nuc in alpha]) n_ij = n_ija.sum(axis=-1) self._gtr = GTR.infer(n_ij, T_ia.sum(axis=-1), root_state, fixed_pi=fixed_pi, pc=pc, alphabet=self.gtr.alphabet, logger=self.logger, prof_map = self.gtr.profile_map) if normalized_rate: self.logger("TreeAnc.infer_gtr: setting overall rate to 1.0...", 2) if site_specific: self._gtr.mu /= self._gtr.average_rate().mean() else: self._gtr.mu=1.0 return self._gtr ################################################################### ### ancestral reconstruction ################################################################### def reconstruct_anc(self,*args, **kwargs): """Shortcut for :py:meth:`treetime.TreeAnc.infer_ancestral_sequences` """ return self.infer_ancestral_sequences(*args,**kwargs) def infer_ancestral_sequences(self, method='probabilistic', infer_gtr=False, marginal=False, **kwargs): """Reconstruct ancestral sequences Parameters ---------- method : str Method to use. Supported values are "fitch" and "ml" infer_gtr : bool Infer a GTR model before reconstructing the sequences marginal : bool Assign sequences that are most likely after averaging over all other nodes instead of the jointly most likely sequences. **kwargs additional keyword arguments that are passed down to :py:meth:`TreeAnc.infer_gtr` and :py:meth:`TreeAnc._ml_anc` Returns ------- N_diff : int Number of nucleotides different from the previous reconstruction. If there were no pre-set sequences, returns N*L """ self.logger("TreeAnc.infer_ancestral_sequences with method: %s, %s"%(method, 'marginal' if marginal else 'joint'), 1)
<reponame>princenyeche/jiraone<filename>jiraone/management.py #!/usr/bin/env python # -*- coding: utf-8 -*- """ Get access to Atlassian User management REST API. You can use the same API key for the organizations REST API and the user management REST API. Create an API key from this URL https://confluence.atlassian.com/x/jPnJOQ This API provides all the access to the User management REST API. """ import typing as t import requests import threading import re from jiraone.exceptions import JiraOneErrors from collections import deque # Define APIs class UserManagement: """ The UserManagement API is used to access organization profiles on Atlassian sites. The alias to this class is called ``manage`` It comes with the below attributes and methods. .. code-block:: python token = "<PASSWORD>" manage.api_token(token) manage.LINK # attribute manage.AUTH # attribute """ # Define constants LINK = "https://api.atlassian.com" AUTH = {"Accept": "application/json"} def __init__(self) -> None: """ A Constructor which also helps with property initialization. """ # Property entry point. self._org_id_ = None self._org_ids_ = None self._domain_id_ = None self._policy_id_ = None self._event_id_ = None def get_user_permission(self, account_id: str, query: list = None) -> t.Any: """Returns the set of permissions you have for managing the specified Atlassian account. :param account_id: A user string value for Atlassian accounts :param query: A query parameter of Array<string> *Valid options* Valid values: profile, profile.write, profile.read, email.set, lifecycle.enablement, apiToken.read, apiToken.delete :return: Any """ if "Authorization" not in self.AUTH: raise JiraOneErrors("login", "You need to authenticate to use this resource") url = f"{self.LINK}/users/{account_id}/manage" if query is None else \ f"{self.LINK}/users/{account_id}/manage?{query}" return requests.get(url, headers=self.AUTH) def manage_profile(self, account_id: str, method: str = "GET", **kwargs: t.Any) -> t.Any: """Returns information about a single Atlassian account by ID by using a "GET" request. :request PATCH: Updates fields in a user account. The profile.write privilege details which fields you can change. :request PUT: Sets the specified user's email address. Before using this endpoint, you must verify the target domain :param account_id: A user string value for Atlassian accounts :param method: A response method condition *Available options* :request GET: Get the return request :request PATCH: Updates a given set of data :body parameter: Any or all user object this is value e.g. {"name": "<NAME>", "nickname": "marshmallow"} :request PUT: Change the email account of the user :body parameter: email - string e.g. {"email": "<EMAIL>"} :param kwargs: - Contains other options passed to the requests.<patch> .. code-block:: python # previous expression # json=<variable_name> payload = {"email": "<EMAIL>"} manage.manage_profile("account_id", "<method>", json=payload) :return: Any """ if "Authorization" not in self.AUTH: raise JiraOneErrors("login", "You need to authenticate to use this resource") url = f"{self.LINK}/users/{account_id}/manage/profile" if method.lower() == "get" or method.lower() == "patch" \ else f"{self.LINK}/users/{account_id}/manage/email" if method.lower() == "get": return requests.get(url, headers=self.AUTH) if method.lower() == "patch": return requests.patch(url, **kwargs, headers=self.AUTH) if method.lower() == "put": return requests.put(url, **kwargs, headers=self.AUTH) else: raise JiraOneErrors("wrong", "The method you posted is not available for this operation.") def api_token(self, account_id: str, method: str = "GET", token_id: str = None) -> t.Any: """Gets the API tokens owned by the specified user or Deletes a specified API token by ID. :param account_id: A user string value for Atlassian accounts :param method: A response method condition :param token_id: A user token id to be deleted. :return: Any """ if "Authorization" not in self.AUTH: raise JiraOneErrors("login", "You need to authenticate to use this resource") url = f"{self.LINK}/users/{account_id}/manage/api-tokens" if token_id is None else \ f"{self.LINK}/users/{account_id}/manage/api-tokens/{token_id}" if method.lower() == "get": return requests.get(url, headers=self.AUTH) elif method.lower() == "delete": return requests.delete(url, headers=self.AUTH) else: raise JiraOneErrors("wrong", "Unexpected method received. Only \"GET\" or \"DELETE\" methods allowed") def manage_user(self, account_id: str, disable: bool = True, **kwargs) -> t.Any: """Disables the specified user account. The permission to make use of this resource is exposed by the lifecycle.enablement privilege. OR Enables the specified user account. The permission to make use of this resource is exposed by the lifecycle.enablement privilege. :param account_id: A user string value for Atlassian accounts :param disable: A bool option, if True this API url is set to disabled :param kwargs: Additional keyword argument to pass body data *Options available when disable is False* .. code-block:: python # previous expression payload = {"message": "On 6-month suspension"} manage.manage_user("account_id", json=payload) :return: Any """ if "Authorization" not in self.AUTH: raise JiraOneErrors("login", "You need to authenticate to use this resource") url = f"{self.LINK}/users/{account_id}/manage/lifecycle/disable" if disable is True else \ f"{self.LINK}/users/{account_id}/manage/lifecycle/enable" return requests.post(url, **kwargs, headers=self.AUTH) def get_organization(self, org_id: t.Optional[str] = None, filter_by: t.Optional[str] = None, domain_id: t.Optional[str] = None, event_id: t.Optional[str] = None, action: t.Optional[bool] = True, policy_id: t.Optional[str] = None, **kwargs: t.Any) -> t.Any: """GET request for the organization API. Returns a list of your organizations (based on your API key). Returns information about a single organization by ID. Returns a list of users in an organization. Returns a list of domains in an organization one page at a time. Returns information about a single verified domain by ID. Returns information about a single event by ID. Returns information about org policies Returns information about a single policy by ID :param org_id: Retrieve the organization id from the API key :param domain_id: Retrieve domain details :param filter_by: Use to determine the endpoint to return *Valid options* * users - return the users in an organization * domains - list of domains in an organization * events - list of events in an audit log * policies - get the policy of the organization :param event_id: Use to determine the events in the audit log :param action: Additional positional argument for events. True sets events-actions * action - Sets the event actions, true to enable by default set to true. e.g action=True :param policy_id: An id of the policy :param kwargs: Optional arguments *Valid options* Any response argument e.g json=payload data=payload :return: Any """ if "Authorization" not in self.AUTH: raise JiraOneErrors("login", "You need to authenticate to use this resource") org_id = self._org_id_ if org_id is None else org_id if filter_by is None: if org_id is None and domain_id is None: url = f"{self.LINK}/admin/v1/orgs" resp = requests.get(url, headers=self.AUTH, **kwargs) self._parse_data_obj(resp, types="org") return resp elif org_id is not None and domain_id is None: url = f"{self.LINK}/admin/v1/orgs/{org_id}" return requests.get(url, headers=self.AUTH, **kwargs) else: if filter_by == "users": if org_id is not None and domain_id is None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/users" return requests.get(url, headers=self.AUTH, **kwargs) elif filter_by == "domains": if org_id is not None and domain_id is None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/domains" resp = requests.get(url, headers=self.AUTH, **kwargs) self._parse_data_obj(resp, types="domain") return resp elif org_id is not None and domain_id is not None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/domains/{domain_id}" return requests.get(url, headers=self.AUTH, **kwargs) elif filter_by == "events": if org_id is not None: if action is False and event_id is None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/events" resp = requests.get(url, headers=self.AUTH, **kwargs) self._parse_data_obj(resp, types="event") return resp elif action is False and event_id is not None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/events/{event_id}" return requests.get(url, headers=self.AUTH, **kwargs) elif action is True and event_id is None or event_id is not None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/event-actions" return requests.get(url, headers=self.AUTH, **kwargs) elif filter_by == "policies": if org_id is not None: if policy_id is None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies" resp = requests.get(url, headers=self.AUTH, **kwargs) self._parse_data_obj(resp, types="policy") return resp elif policy_id is not None: url = f"{self.LINK}/admin/v1/orgs/{org_id}/policies/{policy_id}" return requests.get(url, headers=self.AUTH, **kwargs) else: raise JiraOneErrors("wrong", "Unexpected error - unable to determine parameter value") def manage_organization(self, org_id: str, method: str = "POST", policy_id: t.Optional[str] = None, resource_id: t.Optional[str] = None, **kwargs: t.Any) -> t.Any: """Create, put and delete organization data Create a policy for an org Send a post request by using method="post" as keyword args Update a policy for an org. Send a put request by using method="put" as keyword args You will need to send a payload for the body using the example shown below .. code-block:: json { "id": "<string>", "type": "policy", "attributes": { "type": "ip-allowlist", "name": "<string>", "status": "enabled", "rule": {}, "resources": [ { "id": "<string>", "meta": { "scheduledDate": "<string>", "migrationStartDateTime": "<string>", "migrationEndDataTime": "<string>", "atlassianAccountId": "<string>" }, "links": { "ticket": "<string>" } } ] } } Delete a policy for an
= self.instance_tmpdir.getpath('adjustments.sqlite') writer = SQLiteAdjustmentWriter( dbpath, MockDailyBarReader(), self.trading_calendar.all_sessions, ) splits = mergers = create_empty_splits_mergers_frame() dividends = pd.DataFrame({ 'sid': np.array([1], dtype=np.uint32), 'amount': np.array([10.00], dtype=np.float64), 'declared_date': np.array([events[-3].dt], dtype='datetime64[ns]'), 'ex_date': np.array([events[-2].dt], dtype='datetime64[ns]'), 'record_date': np.array([events[0].dt], dtype='datetime64[ns]'), 'pay_date': np.array( [self.trading_calendar.next_session_label( self.trading_calendar.minute_to_session_label( events[-1].dt ) )], dtype='datetime64[ns]'), }) writer.write(splits, mergers, dividends) adjustment_reader = SQLiteAdjustmentReader(dbpath) # Set the last day to be the last event sim_params = create_simulation_parameters( num_days=6, capital_base=10e3, start=self.sim_params.start_session, end=self.sim_params.end_session ) sim_params = sim_params.create_new( sim_params.start_session, events[-1].dt ) data_portal = create_data_portal_from_trade_history( self.env.asset_finder, self.trading_calendar, self.instance_tmpdir, sim_params, {1: events}, ) data_portal._adjustment_reader = adjustment_reader # Simulate a transaction being filled prior to the ex_date. txns = [create_txn(self.asset1, events[0].dt, 10.0, 100)] results = calculate_results( sim_params, self.asset_finder, data_portal, txns=txns, ) self.assertEqual(len(results), 6) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [-1000, -1000, -1000, -1000, -1000, -1000]) class TestDividendPerformanceHolidayStyle(TestDividendPerformance): # The holiday tests begins the simulation on the day # before Thanksgiving, so that the next trading day is # two days ahead. Any tests that hard code events # to be start + oneday will fail, since those events will # be skipped by the simulation. START_DATE = pd.Timestamp('2003-11-30', tz='utc') END_DATE = pd.Timestamp('2003-12-08', tz='utc') class TestPositionPerformance(WithInstanceTmpDir, WithTradingEnvironment, ZiplineTestCase): def create_environment_stuff(self, num_days=4, sids=[1, 2], futures_sids=[3]): start = pd.Timestamp('2006-01-01', tz='utc') end = start + timedelta(days=num_days * 2) equities = make_simple_equity_info(sids, start, end) futures = pd.DataFrame.from_dict( { sid: { 'start_date': start, 'end_date': end, 'multiplier': 100, 'exchange': "TEST", } for sid in futures_sids }, orient='index', ) self.env = self.enter_instance_context(tmp_trading_env( equities=equities, futures=futures, load=self.make_load_function(), )) self.sim_params = create_simulation_parameters( start=start, num_days=num_days, ) self.finder = self.env.asset_finder self.asset1 = self.env.asset_finder.retrieve_asset(1) self.asset2 = self.env.asset_finder.retrieve_asset(2) self.asset3 = self.env.asset_finder.retrieve_asset(3) def test_long_short_positions(self): """ start with $1000 buy 100 stock1 shares at $10 sell short 100 stock2 shares at $10 stock1 then goes down to $9 stock2 goes to $11 """ self.create_environment_stuff() trades_1 = factory.create_trade_history( self.asset1, [10, 10, 10, 9], [100, 100, 100, 100], oneday, self.sim_params, trading_calendar=self.trading_calendar, ) trades_2 = factory.create_trade_history( self.asset2, [10, 10, 10, 11], [100, 100, 100, 100], oneday, self.sim_params, trading_calendar=self.trading_calendar, ) data_portal = create_data_portal_from_trade_history( self.env.asset_finder, self.trading_calendar, self.instance_tmpdir, self.sim_params, {1: trades_1, 2: trades_2} ) txn1 = create_txn(self.asset1, trades_1[0].dt, 10.0, 100) txn2 = create_txn(self.asset2, trades_1[0].dt, 10.0, -100) pt = perf.PositionTracker(self.sim_params.data_frequency) pp = perf.PerformancePeriod(1000.0, self.sim_params.data_frequency) pp.position_tracker = pt pt.execute_transaction(txn1) pp.handle_execution(txn1) pt.execute_transaction(txn2) pp.handle_execution(txn2) dt = trades_1[-2].dt pt.sync_last_sale_prices(dt, False, data_portal) pp.calculate_performance() check_perf_period( pp, gross_leverage=2.0, net_leverage=0.0, long_exposure=1000.0, longs_count=1, short_exposure=-1000.0, shorts_count=1) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=1000.0, equity_with_loan=1000.0, total_positions_value=0.0, total_positions_exposure=0.0, regt_equity=1000.0, available_funds=1000.0, excess_liquidity=1000.0, cushion=1.0, leverage=2.0, net_leverage=0.0, net_liquidation=1000.0) dt = trades_1[-1].dt pt.sync_last_sale_prices(dt, False, data_portal) pp.calculate_performance() # Validate that the account attributes were updated. account = pp.as_account() check_perf_period( pp, gross_leverage=2.5, net_leverage=-0.25, long_exposure=900.0, longs_count=1, short_exposure=-1100.0, shorts_count=1) check_account(account, settled_cash=1000.0, equity_with_loan=800.0, total_positions_value=-200.0, total_positions_exposure=-200.0, regt_equity=1000.0, available_funds=1000.0, excess_liquidity=1000.0, cushion=1.25, leverage=2.5, net_leverage=-0.25, net_liquidation=800.0) def test_levered_long_position(self): """ start with $1,000, then buy 1000 shares at $10. price goes to $11 """ # post some trades in the market self.create_environment_stuff() trades = factory.create_trade_history( self.asset1, [10, 10, 10, 11], [100, 100, 100, 100], oneday, self.sim_params, trading_calendar=self.trading_calendar, ) data_portal = create_data_portal_from_trade_history( self.env.asset_finder, self.trading_calendar, self.instance_tmpdir, self.sim_params, {1: trades}) txn = create_txn(self.asset1, trades[1].dt, 10.0, 1000) pt = perf.PositionTracker(self.sim_params.data_frequency) pp = perf.PerformancePeriod(1000.0, self.sim_params.data_frequency) pp.position_tracker = pt pt.execute_transaction(txn) pp.handle_execution(txn) pp.calculate_performance() check_perf_period( pp, gross_leverage=10.0, net_leverage=10.0, long_exposure=10000.0, longs_count=1, short_exposure=0.0, shorts_count=0) # Validate that the account attributes were updated. pt.sync_last_sale_prices(trades[-2].dt, False, data_portal) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=-9000.0, equity_with_loan=1000.0, total_positions_value=10000.0, total_positions_exposure=10000.0, regt_equity=-9000.0, available_funds=-9000.0, excess_liquidity=-9000.0, cushion=-9.0, leverage=10.0, net_leverage=10.0, net_liquidation=1000.0) # now simulate a price jump to $11 pt.sync_last_sale_prices(trades[-1].dt, False, data_portal) pp.calculate_performance() check_perf_period( pp, gross_leverage=5.5, net_leverage=5.5, long_exposure=11000.0, longs_count=1, short_exposure=0.0, shorts_count=0) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=-9000.0, equity_with_loan=2000.0, total_positions_value=11000.0, total_positions_exposure=11000.0, regt_equity=-9000.0, available_funds=-9000.0, excess_liquidity=-9000.0, cushion=-4.5, leverage=5.5, net_leverage=5.5, net_liquidation=2000.0) def test_long_position(self): """ verify that the performance period calculates properly for a single buy transaction """ self.create_environment_stuff() # post some trades in the market trades = factory.create_trade_history( self.asset1, [10, 10, 10, 11], [100, 100, 100, 100], oneday, self.sim_params, trading_calendar=self.trading_calendar, ) data_portal = create_data_portal_from_trade_history( self.env.asset_finder, self.trading_calendar, self.instance_tmpdir, self.sim_params, {1: trades}) txn = create_txn(self.asset1, trades[1].dt, 10.0, 100) pt = perf.PositionTracker(self.sim_params.data_frequency) pp = perf.PerformancePeriod(1000.0, self.sim_params.data_frequency, period_open=self.sim_params.start_session, period_close=self.sim_params.end_session) pp.position_tracker = pt pt.execute_transaction(txn) pp.handle_execution(txn) # This verifies that the last sale price is being correctly # set in the positions. If this is not the case then returns can # incorrectly show as sharply dipping if a transaction arrives # before a trade. This is caused by returns being based on holding # stocks with a last sale price of 0. self.assertEqual(pp.positions[1].last_sale_price, 10.0) pt.sync_last_sale_prices(trades[-1].dt, False, data_portal) pp.calculate_performance() self.assertEqual( pp.cash_flow, -1 * txn.price * txn.amount, "capital used should be equal to the opposite of the transaction \ cost of sole txn in test" ) self.assertEqual( len(pp.positions), 1, "should be just one position") self.assertEqual( pp.positions[1].asset, txn.asset, "position should be in security with id 1") self.assertEqual( pp.positions[1].amount, txn.amount, "should have a position of {sharecount} shares".format( sharecount=txn.amount ) ) self.assertEqual( pp.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( pp.positions[1].last_sale_price, trades[-1].price, "last sale should be same as last trade. \ expected {exp} actual {act}".format( exp=trades[-1].price, act=pp.positions[1].last_sale_price) ) self.assertEqual( pp.ending_value, 1100, "ending value should be price of last trade times number of \ shares in position" ) self.assertEqual(pp.pnl, 100, "gain of 1 on 100 shares should be 100") check_perf_period( pp, gross_leverage=1.0, net_leverage=1.0, long_exposure=1100.0, longs_count=1, short_exposure=0.0, shorts_count=0) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=0.0, equity_with_loan=1100.0, total_positions_value=1100.0, total_positions_exposure=1100.0, regt_equity=0.0, available_funds=0.0, excess_liquidity=0.0, cushion=0.0, leverage=1.0, net_leverage=1.0, net_liquidation=1100.0) def test_short_position(self): """verify that the performance period calculates properly for a \ single short-sale transaction""" self.create_environment_stuff(num_days=6) trades = factory.create_trade_history( self.asset1, [10, 10, 10, 11, 10, 9], [100, 100, 100, 100, 100, 100], oneday, self.sim_params, trading_calendar=self.trading_calendar, ) trades_1 = trades[:-2] data_portal = create_data_portal_from_trade_history( self.env.asset_finder, self.trading_calendar, self.instance_tmpdir, self.sim_params, {1: trades}) txn = create_txn(self.asset1, trades[1].dt, 10.0, -100) pt = perf.PositionTracker(self.sim_params.data_frequency) pp = perf.PerformancePeriod(1000.0, self.sim_params.data_frequency) pp.position_tracker = pt pt.execute_transaction(txn) pp.handle_execution(txn) pt.sync_last_sale_prices(trades_1[-1].dt, False, data_portal) pp.calculate_performance() self.assertEqual( pp.cash_flow, -1 * txn.price * txn.amount, "capital used should be equal to the opposite of the transaction\ cost of sole txn in test" ) self.assertEqual( len(pp.positions), 1, "should be just one position") self.assertEqual( pp.positions[1].asset, txn.asset, "position should be in security from the transaction" ) self.assertEqual( pp.positions[1].amount, -100, "should have a position of -100 shares" ) self.assertEqual( pp.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( pp.positions[1].last_sale_price, trades_1[-1].price, "last sale should be price of last trade" ) self.assertEqual( pp.ending_value, -1100, "ending value should be price of last trade times number of \ shares in position" ) self.assertEqual(pp.pnl, -100, "gain of 1 on 100 shares should be 100") # simulate additional trades, and ensure that the position value # reflects the new price trades_2 = trades[-2:] # simulate a rollover to a new period pp.rollover() pt.sync_last_sale_prices(trades[-1].dt, False, data_portal) pp.calculate_performance() self.assertEqual( pp.cash_flow, 0, "capital used should be zero, there were no transactions in \ performance period" ) self.assertEqual( len(pp.positions), 1, "should be just one position" ) self.assertEqual( pp.positions[1].asset, txn.asset, "position should be in security from the transaction" ) self.assertEqual( pp.positions[1].amount, -100, "should have a position of -100 shares" ) self.assertEqual( pp.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( pp.positions[1].last_sale_price, trades_2[-1].price, "last sale should be price of last trade" ) self.assertEqual( pp.ending_value, -900, "ending value should be price of last trade times number of \ shares in position") self.assertEqual( pp.pnl, 200, "drop of 2 on -100 shares should be 200" ) # now run a performance period encompassing the entire trade sample. ptTotal = perf.PositionTracker(self.sim_params.data_frequency) ppTotal = perf.PerformancePeriod( 1000.0, self.sim_params.data_frequency ) ppTotal.position_tracker = pt ptTotal.execute_transaction(txn) ppTotal.handle_execution(txn) ptTotal.sync_last_sale_prices(trades[-1].dt, False, data_portal) ppTotal.calculate_performance() self.assertEqual( ppTotal.cash_flow, -1 * txn.price * txn.amount, "capital used should be equal to the opposite of the transaction \ cost of sole txn in test" ) self.assertEqual( len(ppTotal.positions), 1, "should be
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Nov 5 13:51:51 2018 @author: ivar """ import os from execute import run_shell_command # Splitting NIFTI files along time axis and moving to destination for correction def extract_first_temporary_window_and_save(output_directory, \ blip_file, \ blip_file_name): # blip_file is the relative path from script root + file name # blip_file_name is the file name only process_msg_prefix = "PID %i: " % os.getpid() output_base_name = output_directory + "/" + blip_file_name[:-len(".nii")] + "_" pre_command = 'FSLOUTPUTTYPE=NIFTI' command = 'fslroi ' + '"' + blip_file + '"' + ' ' + '"' + \ output_base_name + '0000"' + ' 0 1' full_command = pre_command + ' && ' + command run_shell_command(full_command) print(process_msg_prefix + "extract_first_temporary_window_and_save: Successfully extracted " + \ " first temporary window of " + \ blip_file + " into directory " + \ output_directory + \ " using fslroi in subprocess shell call") def split_along_temporary_axis_and_save(output_directory, \ blip_file, \ blip_file_name): # blip_file is the relative path from script root + file name # blip_file_name is the file name only process_msg_prefix = "PID %i: " % os.getpid() output_base_name = output_directory + "/" + blip_file_name[:-len(".nii")] + "_" pre_command = 'FSLOUTPUTTYPE=NIFTI' command = 'fslsplit ' + '"' + blip_file + '"' + ' ' + '"' + \ output_base_name + '"' + ' -t' full_command = pre_command + ' && ' + command run_shell_command(full_command) print(process_msg_prefix + "extract_first_temporary_window_and_save: Successfully split " + \ blip_file + " into directory " + output_directory + \ " using fslsplit in subprocess shell call") def merge_blip_down_blip_up_first_temporary_window(blip_down_blip_up_temporary_window_file, \ blip_down_temporary_window_file, \ blip_up_temporary_window_file): # Merge nii files of different phase # Assuming that fslsplit correctly appended # 0000 for the first temporary window # at the end of the file name # Assuming that corresponding data # for both blip directions already # exist in output_directory process_msg_prefix = "PID %i: " % os.getpid() pre_command = 'FSLOUTPUTTYPE=NIFTI' command = 'fslmerge -t ' + '"' + blip_down_blip_up_temporary_window_file + \ '"' + ' ' + '"' + blip_down_temporary_window_file + \ '"' + ' ' + '"' + blip_up_temporary_window_file + '"' full_command = pre_command + ' && ' + command run_shell_command(full_command) print(process_msg_prefix + "Successfully merged " + \ blip_down_temporary_window_file + " with " + \ blip_up_temporary_window_file + " into the file " + \ blip_down_blip_up_temporary_window_file + \ " using fslmerge in subprocess shell call") def topup_compute(merged_image_for_topup_compute_file, \ datain, config): process_msg_prefix = "PID %i: " % os.getpid() output_base_name = merged_image_for_topup_compute_file[:-len(".nii")] out_name = output_base_name + "_generic_out" fout_name = output_base_name + "_field" iout_name = output_base_name + "_corrected" pre_command = 'FSLOUTPUTTYPE=NIFTI' command = 'topup --imain=' + '"' + merged_image_for_topup_compute_file + \ '"' + ' ' + '--datain=' + '"' + datain + \ '"' + ' ' + '--config=' + '"' + config + \ '"' + ' ' + '--out=' + '"' + out_name + \ '"' + ' ' + '--fout=' + '"' + fout_name + \ '"' + ' ' + '--iout=' + '"' + iout_name + '"' full_command = pre_command + ' && ' + command run_shell_command(full_command) print(process_msg_prefix + "Successfully computed off-resonance field " + \ fout_name + " based on " + \ merged_image_for_topup_compute_file + " and used it to correct " + \ merged_image_for_topup_compute_file + " into " + \ iout_name) return iout_name + ".nii", out_name, fout_name + ".nii" def topup_apply(prepared_4D_file, datain, topup_out_base_name_file): process_msg_prefix = "PID %i: " % os.getpid() output_base_name = prepared_4D_file[:-len(".nii")] out_name = output_base_name + "_applytopup" pre_command = 'FSLOUTPUTTYPE=NIFTI' command = 'applytopup --imain=' + '"' + prepared_4D_file[:-len(".nii")] + \ '"' + ' ' + '--inindex=1' + \ ' ' + '--datain=' + '"' + datain + \ '"' + ' ' + '--topup=' + '"' + topup_out_base_name_file + \ '"' + ' ' + '--out=' + '"' + out_name + '" --method=jac' full_command = pre_command + ' && ' + command run_shell_command(full_command) print(process_msg_prefix + "Successfully ran applytopup on " + \ prepared_4D_file + " based on topup output" + \ topup_out_base_name_file + "*") return out_name + ".nii" def add_duplicate_slices(output_directory, file_name): process_msg_prefix = "PID %i: " % os.getpid() output_base = file_name[:-len(".nii")] output_zmin = output_base + '_zmin' output_zmax = output_base + '_zmax' output_prep = output_base + '_prep_topup' output_prep_file = output_directory + "/" + output_prep + ".nii" pre_command = 'cd ' + '"' + output_directory + '"' + \ ' && FSLOUTPUTTYPE=NIFTI && xdim=$(fslval ' + \ '"' + file_name + '"' + ' dim1) && ydim=$(fslval ' + \ '"' + file_name + '"' + ' dim2) && zdim=$(fslval ' + \ '"' + file_name + '"' + ' dim3)' # This command will extract the 2D lowest slice along z axis # to the file output_zmin.nii output_zmin_command = 'fslroi ' + '"' + file_name + '"' + \ ' ' + '"' + output_zmin + '"' + \ ' ' + '0 $xdim' + \ ' ' + '0 $ydim' + \ ' ' + '0 1' # This command will extract the 2D highest slice along z axis # to the file output_zmax.nii output_zmax_command = 'fslroi ' + '"' + file_name + '"' + \ ' ' + '"' + output_zmax + '"' + \ ' ' + '0 $xdim' + \ ' ' + '0 $ydim' + \ ' ' + '$((zdim-1)) 1' # This command will merge the two extracted 2D slices # output_zmin.nii and output_zmax.nii to file , # to the file output_prep.nii output_prep_command = 'fslmerge -z ' + '"' + output_prep + '"' + \ ' ' + '"' + output_zmin + '"' + \ ' ' + '"' + file_name + '"' + \ ' ' + '"' + output_zmax + '"' # Concatenate all the commands into a one-liner command (a large string string to be evaluated in a shell environment) full_command = pre_command + ' && ' + output_zmin_command + ' && ' + output_zmax_command + ' && ' + output_prep_command run_shell_command(full_command) print(process_msg_prefix + "Successfully merged " + \ "duplicate zmin and zmax slices to " + \ output_directory + "/" + file_name + \ ", thereby creating " + \ output_prep_file + \ " for FSL topup") return output_prep_file def remove_first_and_last_slices_and_save(output_directory, file_name): process_msg_prefix = "PID %i: " % os.getpid() output_base = file_name[:-len(".nii")] output_file_name = output_base + '_postp' output_file = output_directory + "/" + output_file_name + ".nii" pre_command = 'cd ' + '"' + output_directory + '"' + \ ' && FSLOUTPUTTYPE=NIFTI && xdim=$(fslval ' + \ '"' + file_name + '"' + ' dim1) && ydim=$(fslval ' + \ '"' + file_name + '"' + ' dim2) && zdim=$(fslval ' + \ '"' + file_name + '"' + ' dim3)' # This command will extract the 2D lowest slice along z axis # to the file output_zmin.nii command = 'fslroi ' + '"' + file_name + '"' + \ ' ' + '"' + output_file_name + '"' + \ ' ' + '0 $xdim' + \ ' ' + '0 $ydim' + \ ' ' + '1 $((zdim-2))' # Concatenate all the commands into a one-liner command (a large string string to be evaluated in a shell environment) full_command = pre_command + ' && ' + command run_shell_command(full_command) print(process_msg_prefix + "Successfully removed " + \ "first and last z slice from" + \ output_directory + "/" + file_name + \ ", and saved to" + \ output_file) return output_file def copy_header(source_nii_file, dest_nii_file): # Make NIFTI header of dest_nii_file # equal to NIFTI header or source_nii_file process_msg_prefix = "PID %i: " % os.getpid() pre_command = 'FSLOUTPUTTYPE=NIFTI' # This command will extract the 2D lowest slice along z axis # to the file output_zmin.nii command = 'fslcpgeom ' + \ '"' + source_nii_file
4.6, 5.6)*nanometers) topology_after = modeller.getTopology() dim3 = topology_after.getPeriodicBoxVectors() self.assertVecAlmostEqual(dim3[0]/nanometers, Vec3(3.6, 0, 0)) self.assertVecAlmostEqual(dim3[1]/nanometers, Vec3(0, 4.6, 0)) self.assertVecAlmostEqual(dim3[2]/nanometers, Vec3(0, 0, 5.6)) # Third way of passing in the periodic box vectors: with the boxVectors parameter to addSolvent() topology_start = self.pdb.topology modeller = Modeller(topology_start, self.positions) modeller.deleteWater() modeller.addSolvent(self.forcefield, boxVectors = (Vec3(3.4, 0, 0), Vec3(0.5, 4.4, 0), Vec3(-1.0, -1.5, 5.4))*nanometers) topology_after = modeller.getTopology() dim3 = topology_after.getPeriodicBoxVectors() self.assertVecAlmostEqual(dim3[0]/nanometers, Vec3(3.4, 0, 0)) self.assertVecAlmostEqual(dim3[1]/nanometers, Vec3(0.5, 4.4, 0)) self.assertVecAlmostEqual(dim3[2]/nanometers, Vec3(-1.0, -1.5, 5.4)) # Fourth way of passing in the periodic box vectors: pass a 'padding' value to addSolvent() topology_start = self.pdb.topology modeller = Modeller(topology_start, self.positions) modeller.deleteWater() modeller.addSolvent(self.forcefield, padding = 1.0*nanometers) topology_after = modeller.getTopology() dim3 = topology_after.getPeriodicBoxVectors() self.assertVecAlmostEqual(dim3[0]/nanometers, Vec3(2.8802, 0, 0)) self.assertVecAlmostEqual(dim3[1]/nanometers, Vec3(0, 2.8802, 0)) self.assertVecAlmostEqual(dim3[2]/nanometers, Vec3(0, 0, 2.8802)) # Fifth way: specify a number of molecules to add instead of a box size topology_start = self.pdb.topology modeller = Modeller(topology_start, self.positions) modeller.deleteWater() numInitial = len(list(modeller.topology.residues())) modeller.addSolvent(self.forcefield, numAdded=1000) self.assertEqual(numInitial+1000, len(list(modeller.topology.residues()))) def test_addSolventNeutralSolvent(self): """ Test the addSolvent() method; test adding ions to neutral solvent. """ topology_start = self.pdb.topology topology_start.setUnitCellDimensions(Vec3(3.5, 3.5, 3.5)*nanometers) modeller = Modeller(topology_start, self.positions) modeller.deleteWater() modeller.addSolvent(self.forcefield, ionicStrength = 2.0*molar) topology_after = modeller.getTopology() water_count=0 sodium_count=0 chlorine_count=0 for residue in topology_after.residues(): if residue.name=='HOH': water_count += 1 elif residue.name=='NA': sodium_count += 1 elif residue.name=='CL': chlorine_count += 1 total_added = water_count+sodium_count+chlorine_count self.assertEqual(total_added, 1364) expected_ion_fraction = 2.0*molar/(55.4*molar) expected_ions = math.floor(total_added*expected_ion_fraction+0.5) self.assertEqual(sodium_count, expected_ions) self.assertEqual(chlorine_count, expected_ions) def test_addSolventNegativeSolvent(self): """ Test the addSolvent() method; test adding ions to a negatively charged solvent. """ topology_start = self.pdb.topology topology_start.setUnitCellDimensions(Vec3(3.5, 3.5, 3.5)*nanometers) for neutralize in (True, False): # set up modeller with no solvent modeller = Modeller(topology_start, self.positions) modeller.deleteWater() # add 5 Cl- ions to the original topology topology_toAdd = Topology() newChain = topology_toAdd.addChain() for i in range(5): topology_toAdd.addResidue('CL', newChain) residues = [residue for residue in topology_toAdd.residues()] for i in range(5): topology_toAdd.addAtom('Cl',Element.getBySymbol('Cl'), residues[i]) positions_toAdd = [Vec3(1.0,1.2,1.5), Vec3(1.7,1.0,1.4), Vec3(1.5,2.0,1.0), Vec3(2.0,2.0,2.0), Vec3(2.0,1.5,1.0)]*nanometers modeller.add(topology_toAdd, positions_toAdd) modeller.addSolvent(self.forcefield, ionicStrength=1.0*molar, neutralize=neutralize) topology_after = modeller.getTopology() water_count = 0 sodium_count = 0 chlorine_count = 0 for residue in topology_after.residues(): if residue.name=='HOH': water_count += 1 elif residue.name=='NA': sodium_count += 1 elif residue.name=='CL': chlorine_count += 1 total_water_ions = water_count+sodium_count+chlorine_count expected_ion_fraction = 1.0*molar/(55.4*molar) expected_chlorine = math.floor((total_water_ions-10)*expected_ion_fraction+0.5)+5 expected_sodium = expected_chlorine if neutralize else expected_chlorine-5 self.assertEqual(sodium_count, expected_sodium) self.assertEqual(chlorine_count, expected_chlorine) def test_addSolventPositiveSolvent(self): """ Test the addSolvent() method; test adding ions to a positively charged solvent. """ topology_start = self.pdb.topology topology_start.setUnitCellDimensions(Vec3(3.5, 3.5, 3.5)*nanometers) for neutralize in (True, False): # set up modeller with no solvent modeller = Modeller(topology_start, self.positions) modeller.deleteWater() # add 5 Na+ ions to the original topology topology_toAdd = Topology() newChain = topology_toAdd.addChain() for i in range(5): topology_toAdd.addResidue('NA', newChain) residues = [residue for residue in topology_toAdd.residues()] for i in range(5): topology_toAdd.addAtom('Na',Element.getBySymbol('Na'), residues[i]) positions_toAdd = [Vec3(1.0,1.2,1.5), Vec3(1.7,1.0,1.4), Vec3(1.5,2.0,1.0), Vec3(2.0,2.0,2.0), Vec3(2.0,1.5,1.0)]*nanometers # positions_toAdd doesn't need to change modeller.add(topology_toAdd, positions_toAdd) modeller.addSolvent(self.forcefield, ionicStrength=1.0*molar, neutralize=neutralize) topology_after = modeller.getTopology() water_count = 0 sodium_count = 0 chlorine_count = 0 for residue in topology_after.residues(): if residue.name=='HOH': water_count += 1 elif residue.name=='NA': sodium_count += 1 elif residue.name=='CL': chlorine_count += 1 total_water_ions = water_count+sodium_count+chlorine_count expected_ion_fraction = 1.0*molar/(55.4*molar) expected_sodium = math.floor((total_water_ions-10)*expected_ion_fraction+0.5)+5 expected_chlorine = expected_sodium if neutralize else expected_sodium-5 self.assertEqual(sodium_count, expected_sodium) self.assertEqual(chlorine_count, expected_chlorine) def test_addSolventIons(self): """ Test the addSolvent() method with all possible choices for positive and negative ions. """ topology_start = self.pdb.topology topology_start.setUnitCellDimensions(Vec3(3.5, 3.5, 3.5)*nanometers) # set up modeller with no solvent modeller = Modeller(topology_start, self.positions) modeller.deleteWater() topology_nowater = modeller.getTopology() positions_nowater = modeller.getPositions() expected_ion_fraction = 1.0*molar/(55.4*molar) for positiveIon in ['Cs+', 'K+', 'Li+', 'Na+', 'Rb+']: ionName = positiveIon[:-1].upper() modeller = Modeller(topology_nowater, positions_nowater) modeller.addSolvent(self.forcefield, positiveIon=positiveIon, ionicStrength=1.0*molar) topology_after = modeller.getTopology() water_count = 0 positive_ion_count = 0 chlorine_count = 0 for residue in topology_after.residues(): if residue.name=='HOH': water_count += 1 elif residue.name==ionName: positive_ion_count += 1 elif residue.name=='CL': chlorine_count += 1 total_added = water_count+positive_ion_count+chlorine_count self.assertEqual(total_added, 1364) expected_ions = math.floor(total_added*expected_ion_fraction+0.5) self.assertEqual(positive_ion_count, expected_ions) self.assertEqual(chlorine_count, expected_ions) for negativeIon in ['Cl-', 'Br-', 'F-', 'I-']: ionName = negativeIon[:-1].upper() modeller = Modeller(topology_nowater, positions_nowater) modeller.addSolvent(self.forcefield, negativeIon=negativeIon, ionicStrength=1.0*molar) topology_after = modeller.getTopology() water_count = 0 sodium_count = 0 negative_ion_count = 0 for residue in topology_after.residues(): if residue.name=='HOH': water_count += 1 elif residue.name=='NA': sodium_count += 1 elif residue.name==ionName: negative_ion_count += 1 total_added = water_count+sodium_count+negative_ion_count self.assertEqual(total_added, 1364) expected_ions = math.floor(total_added*expected_ion_fraction+0.5) self.assertEqual(positive_ion_count, expected_ions) self.assertEqual(chlorine_count, expected_ions) def test_addHydrogensPdb2(self): """ Test the addHydrogens() method on the T4-lysozyme-L99A pdb file. """ # build the Modeller topology_start = self.topology_start2 positions = self.positions2 modeller = Modeller(topology_start, positions) # remove hydrogens from the topology toDelete = [atom for atom in topology_start.atoms() if atom.element==Element.getBySymbol('H')] modeller.delete(toDelete) # Create a variants list to force the one histidine to be of the right variation. residues = [residue for residue in topology_start.residues()] variants = [None]*len(residues) # For this protein, when you add hydrogens, the hydrogen is added to the delta nitrogen. # By setting variants[30] to 'HIE', we force the hydrogen onto the epsilon nitrogen, so # that it will match the topology in topology_start. variants[30] = 'HIE' # add the hydrogens back modeller.addHydrogens(self.forcefield, variants=variants) topology_after = modeller.getTopology() validate_equivalence(self, topology_start, topology_after) def test_addHydrogensPdb3(self): """ Test the addHydrogens() method on the metallothionein pdb file. """ # build the Modeller topology_start = self.topology_start3 positions = self.positions3 modeller = Modeller(topology_start, positions) # remove hydrogens from the topology toDelete = [atom for atom in topology_start.atoms() if atom.element==Element.getBySymbol('H')] modeller.delete(toDelete) # add the hydrogens back modeller.addHydrogens(self.forcefield) topology_after = modeller.getTopology() validate_equivalence(self, topology_start, topology_after) def test_addHydrogensPdb3_keepPositions(self): """ Test addHydrogens() does not change existing Hs positions """ # build the Modeller topology_start = self.topology_start3 positions = self.positions3.value_in_unit(nanometers) modeller = Modeller(topology_start, positions) # Record original hydrogen positions oriH = [atom.index for atom in modeller.topology.atoms() if atom.element == element.hydrogen] oriH_pos = [positions[i] for i in oriH] # Remove hydrogens from last residue res_list = list(topology_start.residues()) toDelete = [atom for atom in res_list[-1].atoms() if atom.element == element.hydrogen] modeller.delete(toDelete) n_deleted = len(toDelete) # Add hydrogen atoms back. modeller.addHydrogens(self.forcefield) topology_after = modeller.getTopology() # Fetch 'new' positions new_positions = modeller.positions.value_in_unit(nanometers) newH = [atom.index for atom in topology_after.atoms() if atom.element == element.hydrogen] newH_pos = [new_positions[i] for i in newH] # Did we add all Hs back in correctly? self.assertEqual(len(newH), len(oriH)) # Are the old ones at the same position? # Negative control oriH_fixed = oriH_pos[:-1*n_deleted] newH_fixed = newH_pos[:-1*n_deleted] xyz_diff = any([norm(o-n) > 1e-6 for o, n in zip(oriH_fixed, newH_fixed)]) self.assertEqual(xyz_diff, False) # Were the new ones optimized? # Positive control oriH_added = oriH_pos[-1*n_deleted:] newH_added = newH_pos[-1*n_deleted:] xyz_diff = all([norm(o-n) > 1e-6 for o, n in zip(oriH_added, newH_added)]) self.assertEqual(xyz_diff, True) def test_addHydrogensASH(self): """ Test of addHydrogens() in which we force ASH to be a variant using the variants parameter. """ # use the T4-lysozyme-L99A pdb file topology_start = self.topology_start2 positions = self.positions2 # build the Modeller modeller = Modeller(topology_start, positions) # remove hydrogens from the topology toDelete = [atom for atom in topology_start.atoms() if atom.element==Element.getBySymbol('H')] modeller.delete(toDelete) # Create a variants list to force the one histidine to be of the right variation. residues = [residue for residue in topology_start.residues()] variants = [None]*len(residues) # For this protein, when you add hydrogens, the hydrogen is added to the delta nitrogen. # By setting variants[30] to 'HIE', we force the hydrogen onto the epsilon nitrogen, so # that it will match the topology in topology_start. variants[30] = 'HIE' ASP_residue_list = [9,19,46,60,69,71,88,91,126,158] for residue_index in ASP_residue_list: variants[residue_index] = 'ASH' # add the hydrogens back, using the variants list we just built modeller.addHydrogens(self.forcefield, variants=variants) topology_ASH = modeller.getTopology() # There should be extra hydrogens on the ASP residues. Assert that they exist, # then we delete them and validate that the topology matches what we started with. index_list_ASH = [176, 357, 761, 976, 1121, 1150, 1430, 1473, 2028, 2556] atoms = [atom for atom in topology_ASH.atoms()] toDelete2 = [] for index in index_list_ASH: self.assertTrue(atoms[index].element.symbol=='H') toDelete2.append(atoms[index]) modeller.delete(toDelete2) topology_ASP = modeller.getTopology() validate_equivalence(self, topology_ASP, topology_start) def test_addHydrogensCYX(self): """ Test of addHydrogens() in which we force CYX to be a variant using the variants parameter. """ # use the metallothionein pdb file topology_start = self.topology_start3 positions = self.positions3 # build the Modeller modeller = Modeller(topology_start, positions) # remove hydrogens from the
True if self.start is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory.Flags.Flag.Slots.Slot.CardInfo.OirCircularBuffer']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:card-info' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.card_flag is not None: return True if self.card_name is not None: return True if self.card_state is not None: return True if self.card_type is not None: return True if self.cxp_avail_bitmap is not None: return True if self.evt_flag is not None: return True if self.exp_num_asics is not None: return True if self.exp_num_asics_per_fsdb is not None: return True if self.instance is not None: return True if self.is_powered is not None: return True if self.num_cos_per_port is not None: return True if self.num_ilkns_per_asic is not None: return True if self.num_local_ports_per_ilkn is not None: return True if self.oir_circular_buffer is not None and self.oir_circular_buffer._has_data(): return True if self.reg_flag is not None: return True if self.slot_no is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory.Flags.Flag.Slots.Slot.CardInfo']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.slot is None: raise YPYModelError('Key property slot is None') return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:slot[Cisco-IOS-XR-dnx-driver-oper:slot = ' + str(self.slot) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.slot is not None: return True if self.asic_avail_mask is not None: return True if self.asic_oper_notify_to_fsdb_pending_bmap is not None: return True if self.board_rev_id is not None: return True if self.card_avail_mask is not None: return True if self.card_info is not None: for child_ref in self.card_info: if child_ref._has_data(): return True if self.coeff_major_rev is not None: return True if self.coeff_minor_rev is not None: return True if self.device_info is not None: for child_ref in self.device_info: if child_ref._has_data(): return True if self.drv_version is not None: return True if self.drvr_current_startup_timestamp is not None: return True if self.drvr_initial_startup_timestamp is not None: return True if self.exp_asic_avail_mask is not None: return True if self.fc_mode is not None: return True if self.fgid_conn_active is not None: return True if self.fgid_reg_active is not None: return True if self.fsdb_conn_active is not None: return True if self.fsdb_reg_active is not None: return True if self.functional_role is not None: return True if self.is_cih_registered is not None: return True if self.is_driver_ready is not None: return True if self.is_fgid_download_completed is not None: return True if self.is_fgid_download_in_progress is not None: return True if self.is_full_fgid_download_req is not None: return True if self.is_gaspp_registered is not None: return True if self.issu_abort_rcvd is not None: return True if self.issu_abort_sent is not None: return True if self.issu_mgr_conn_active is not None: return True if self.issu_mgr_reg_active is not None: return True if self.issu_ready_ntfy_pending is not None: return True if self.issu_role is not None: return True if self.num_cm_conn_reqs is not None: return True if self.num_fgid_conn_reqs is not None: return True if self.num_fsdb_conn_reqs is not None: return True if self.num_fstats_conn_reqs is not None: return True if self.num_intf_ports is not None: return True if self.num_issu_mgr_conn_reqs is not None: return True if self.num_peer_fia_conn_reqs is not None: return True if self.num_pm_conn_reqs is not None: return True if self.rack_name is not None: return True if self.rack_num is not None: return True if self.rack_type is not None: return True if self.respawn_count is not None: return True if self.total_asics is not None: return True if self.uc_weight is not None: return True if self.ucmc_ratio is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory.Flags.Flag.Slots.Slot']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:slots' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.slot is not None: for child_ref in self.slot: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory.Flags.Flag.Slots']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.flag is None: raise YPYModelError('Key property flag is None') return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:flag[Cisco-IOS-XR-dnx-driver-oper:flag = ' + str(self.flag) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.flag is not None: return True if self.slots is not None and self.slots._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory.Flags.Flag']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:flags' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.flag is not None: for child_ref in self.flag: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory.Flags']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-dnx-driver-oper:oir-history' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.flags is not None and self.flags._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_dnx_driver_oper as meta return meta._meta_table['Fia.Nodes.Node.OirHistory']['meta_info'] class AsicStatistics(object): """ FIA asic statistics information .. attribute:: statistics_asic_instances Instance table for statistics **type**\: :py:class:`StatisticsAsicInstances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances>` """ _prefix = 'dnx-driver-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.statistics_asic_instances = Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances() self.statistics_asic_instances.parent = self class StatisticsAsicInstances(object): """ Instance table for statistics .. attribute:: statistics_asic_instance Asic instance for statistics **type**\: list of :py:class:`StatisticsAsicInstance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance>` """ _prefix = 'dnx-driver-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.statistics_asic_instance = YList() self.statistics_asic_instance.parent = self self.statistics_asic_instance.name = 'statistics_asic_instance' class StatisticsAsicInstance(object): """ Asic instance for statistics .. attribute:: instance <key> Asic instance **type**\: int **range:** 0..255 .. attribute:: fmac_statistics Statistics of FMAC **type**\: :py:class:`FmacStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.FmacStatistics>` .. attribute:: pbc_statistics Packet Byte Counter for a Asic **type**\: :py:class:`PbcStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics>` """ _prefix = 'dnx-driver-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.instance = None self.fmac_statistics = Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.FmacStatistics() self.fmac_statistics.parent = self self.pbc_statistics = Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics() self.pbc_statistics.parent = self class PbcStatistics(object): """ Packet Byte Counter for a Asic .. attribute:: pbc_stats PBC stats bag **type**\: :py:class:`PbcStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics.PbcStats>` """ _prefix = 'dnx-driver-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.pbc_stats = Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics.PbcStats() self.pbc_stats.parent = self class PbcStats(object): """ PBC stats bag .. attribute:: asic_instance asic instance **type**\: int **range:** 0..4294967295 .. attribute:: chip_ver chip ver **type**\: int **range:** 0..65535 .. attribute:: rack_no rack no **type**\: int **range:** 0..4294967295 .. attribute:: slot_no slot no **type**\: int **range:** 0..4294967295 .. attribute:: stats_info stats info **type**\: :py:class:`StatsInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics.PbcStats.StatsInfo>` .. attribute:: valid valid **type**\: bool """ _prefix = 'dnx-driver-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.asic_instance = None self.chip_ver = None self.rack_no = None self.slot_no = None self.stats_info = Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics.PbcStats.StatsInfo() self.stats_info.parent = self self.valid = None class StatsInfo(object): """ stats info .. attribute:: block_info block info **type**\: list of :py:class:`BlockInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics.PbcStats.StatsInfo.BlockInfo>` .. attribute:: num_blocks Num Blocks **type**\: int **range:** 0..255 """ _prefix = 'dnx-driver-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.block_info = YList() self.block_info.parent = self self.block_info.name = 'block_info' self.num_blocks = None class BlockInfo(object): """ block info .. attribute:: block_name Block Name **type**\: str **length:** 0..11 .. attribute:: field_info field info **type**\: list of :py:class:`FieldInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_dnx_driver_oper.Fia.Nodes.Node.AsicStatistics.StatisticsAsicInstances.StatisticsAsicInstance.PbcStatistics.PbcStats.StatsInfo.BlockInfo.FieldInfo>` .. attribute:: num_fields
else: form = ProposalFormEdit(request.POST, request.FILES, request=request, instance=obj) if form.is_valid(): obj = form.save() if form.changed_data: update_cached_project(obj) check_content_policy.CPVCheckThread(obj).start() if obj.Private.all(): for std in obj.Private.all(): mail_proposal_private(obj, std, "Your private proposal was edited.") return render(request, "proposals/message_project.html", {"Message": "Proposal saved!", "project": obj}) else: if obj.Status == 4: form = ProposalFormLimited(request=request, instance=obj) title = 'Edit active Proposal' template = 'proposals/form_active_proposal.html' else: form = ProposalFormEdit(request=request, instance=obj) return render(request, template, {'form': form, 'formtitle': title, 'buttontext': 'Save'}) @group_required('type1staff', 'type2staff', 'type2staffunverified', 'type3staff', 'type4staff') @can_view_project def copy_project(request, pk): """ Copy a proposal from a previous timeslot. Only for staff that is allowed to see the proposal to copy. :param pk: the id of proposal to copy :param request: :return: """ if request.method == 'POST': form = ProposalFormCreate(request.POST, request=request) if form.is_valid(): prop = form.save() mail_proposal_all(request, prop) if prop.Private.all(): for std in prop.Private.all(): mail_proposal_private(prop, std, "A private proposal was created for you.") return render(request, "proposals/message_project.html", {"Message": "Proposal created!", "project": prop}) else: old_proposal = get_object_or_404(Proposal, pk=pk) oldpk = old_proposal.pk old_proposal.pk = None # default timeslot. Overridden by form if this is not in phase 1. old_proposal.TimeSlot = get_timeslot() # Assistants and privates are removed, because m2m is not copied in this way. form = ProposalFormCreate(request=request, instance=old_proposal, copy=oldpk) return render(request, 'GenericForm.html', {'form': form, 'formtitle': 'Edit copied proposal', 'buttontext': 'Create and go to next step'}) @group_required('type1staff', 'type2staff', 'type2staffunverified', 'type3staff', 'type4staff') @can_edit_project def add_file(request, pk, ty): """ Add a file of type ty to a project. The type can be an image or a file (usually pdf). The image is shown in an image slider, an attachment is shown as a download button. :param request: :param pk: pk of the project :param ty: type of file to add. i for image, a for attachment :return: """ obj = get_object_or_404(Proposal, pk=pk) if ty == "i": ty = "image" form = ProposalImageForm elif ty == "a": ty = "attachment" form = ProposalAttachmentForm else: raise PermissionDenied("Invalid type supplied") if request.method == 'POST': form = form(request.POST, request.FILES, request=request) if form.is_valid(): file = form.save(commit=False) file.Proposal = obj file.save() return render(request, "proposals/message_project.html", {"Message": "File to Proposal saved! Click the button below to add another file.", "project": obj}) return render(request, 'GenericForm.html', {'form': form, 'formtitle': 'Add ' + ty + ' to project ' + obj.Title, 'buttontext': 'Save'}) @group_required('type1staff', 'type2staff', 'type2staffunverified', 'type3staff', 'type4staff') @can_edit_project def edit_file(request, pk, ty): """ Edit a file of a project. :param request: :param pk: pk of the proposal to edit file of :param ty: type of file to edit, either i for image or a for attachement :return: """ obj = get_object_or_404(Proposal, pk=pk) if ty == "i": ty = "image" model = ProposalImage form = ProposalImageForm elif ty == "a": ty = "attachment" model = ProposalAttachment form = ProposalAttachmentForm else: raise PermissionDenied("Invalid type supplied") form_set = modelformset_factory(model, form=form, can_delete=True, extra=0) qu = model.objects.filter(Proposal=obj) formset = form_set(queryset=qu) if request.method == 'POST': formset = form_set(request.POST, request.FILES) if formset.is_valid(): formset.save() return render(request, "proposals/message_project.html", {"Message": "File changes saved!", "project": obj}) return render(request, 'GenericForm.html', {'formset': formset, 'formtitle': 'All ' + ty + 's in project ' + obj.Title, "Proposal": obj.pk, 'buttontext': 'Save changes'}) @group_required('type3staff') def ask_delete_project(request, pk): """ A confirmform for type3staff to delete a proposal. Regular staff cannot delete a proposal, as this should not happen. Public (=status4) proposals cannot be deleted. :param request: :param pk: pk of proposal to delete. :return: """ obj = get_object_or_404(Proposal, pk=pk) if obj.Status >= 3: return render(request, "proposals/message_project.html", {"Message": "This Proposal is already approved or public, it cannot be deleted", "project": obj}, status=403) form = "<a href=" + reverse('proposals:deleteproposal', kwargs={"pk": int( pk)}) + " class='button warning'><span class='mif-bin'></span>click here to DELETE</a></button></form>" return render(request, "base.html", {"Message": "Are you sure to delete? This cannot be undone " + form}) @group_required('type3staff') def delete_project(request, pk): """ Really delete a proposal. This can only be called by type3staff after going to the confirm delete page. :param request: :param pk: pk of the proposal to delete :return: """ obj = get_object_or_404(Proposal, pk=pk) if obj.Status >= 3: return render(request, "proposals/message_project.html", {"Message": "Proposal is locked for editing", "project": obj}, status=403) if "HTTP_REFERER" in request.META: if 'ask' in request.META['HTTP_REFERER']: # make sure previous page is askdelete title = obj.Title delete_object(obj) return render(request, "base.html", {"Message": "Proposal " + title + " is removed", "return": ""}) raise PermissionDenied("You should not access this page directly") @can_upgrade_project def upgrade_status(request, pk): """ Upgrade the status of a given proposal. :param request: :param pk: pk of the proposal to upgrade :return: """ r = upgrade_status_api(request, pk) obj = get_object_or_404(Proposal, pk=pk) # should be after upgrade return render(request, "proposals/message_project.html", {"Message": r.content.decode(), "project": obj}, status=r.status_code) @can_downgrade_project def downgrade_status(request, pk): """ Downgrade the status of a proposal, and send the affected users (responsible and assistants) a mail that their proposal is downgraded in status. Mailing is done via mailaffecteduser via downgradestatusApi. :param request: :param pk: pk of the proposal to downgrade :return: """ obj = get_object_or_404(Proposal, pk=pk) if request.user == obj.ResponsibleStaff or request.user == obj.Track.Head: if request.method == "POST": form = ProposalDowngradeMessageForm(request.POST) if form.is_valid(): message = form.cleaned_data['Message'] r = downgrade_status_api(request, pk, message) obj = get_object_or_404(Proposal, pk=pk) # should be after downgrade notify = r.content.decode() if message != '': notify += "<br />With note: <br /> {}".format(message) return render(request, "proposals/message_project.html", {"Message": notify, "project": obj}, status=r.status_code) else: form = ProposalDowngradeMessageForm() # request=request return render(request, 'GenericForm.html', {'form': form, 'formtitle': 'Message for downgrade proposal ' + obj.Title, 'buttontext': 'Downgrade and send message'}) else: # assistant downgrade does not get the message field. r = downgrade_status_api(request, pk) obj = get_object_or_404(Proposal, pk=pk) # should be after downgrade return render(request, "proposals/message_project.html", {"Message": r.content.decode(), "project": obj}, status=r.status_code) @group_required('type1staff', 'type2staff', 'type2staffunverified', 'type3staff', 'type4staff') @can_share_project def share(request, pk): """ Get a sharelink for a given proposal. This link is a public view link for a proposal-detailpage for when the proposal is not yet public. :param request: :param pk: Proposal pk to get sharelink for :return: """ link = get_share_link(pk) return render(request, "base.html", { "Message": "Share link created: <a href=\"{}\">{}</a> <br/>" " Use this to show the proposal to anybody without an account. " "The link will be valid for seven days.".format(link, link), }) def view_share_link(request, token): """ Translate a given sharelink to a proposal-detailpage. :param request: :param token: sharelink token, which includes the pk of the proposal :return: proposal detail render """ try: pk = signing.loads(token, max_age=settings.MAXAGESHARELINK) except signing.SignatureExpired: return render(request, "base.html", { "Message": "Share link has expired!" }) except signing.BadSignature: return render(request, "base.html", { "Message": "Invalid token in share link!" }) obj = get_object_or_404(Proposal, pk=pk) return render(request, "proposals/detail_project.html", { "proposal": obj, "project": obj }) @group_required('type1staff', 'type2staff', 'type3staff', 'type4staff') @phase_required(5, 6, 7) def stats_personal(request, timeslot, step=0): """ Gives an overview of the statistics of the proposals of the user. These include the amount of visitors and applications. This is only for timephase 5 and later :param request: :param step: integer, which step of the wizard view you want to see, supplied via URI :return: """ timeslot = get_object_or_404(TimeSlot, pk=timeslot) step = int(step) projects = get_all_projects(old=True).filter(TimeSlot=timeslot, Status=4) if get_grouptype("3") in request.user.groups.all(): pass else: projects = projects.filter(Q(ResponsibleStaff=request.user) | Q(Assistants=request.user) | Q(Group__Administrators=request.user)) projects = list(projects.distinct().annotate(Count('distributions')).order_by('-distributions__count')) if step == 0: return render(request, "proposals/stats_project_personal.html", {"step": 0, 'timeslot': timeslot, 'timeslots': TimeSlot.objects.exclude(pk=timeslot.pk)}) elif step == 1: counts = [] tabledata = [] for p in projects: try: counts.append(p.tracking.UniqueVisitors.count()) except ProposalTracking.DoesNotExist: counts.append(0) try: tabledata.append({ "prop": p, "count": p.tracking.UniqueVisitors.count() }) except ProposalTracking.DoesNotExist: tabledata.append({ "prop": p, "count": 0 }) return render(request, "proposals/stats_project_personal.html", { "counts": counts, "labels": [truncate_string(p.Title) for p in projects], "tabledata": tabledata, "step": 1, 'timeslot': timeslot, }) else: if step - 2 >= len(projects): return render(request, "proposals/stats_project_personal.html", {"step": -1, 'timeslot': timeslot}) prop = projects[step - 2] try: count = prop.tracking.UniqueVisitors.count() except ProposalTracking.DoesNotExist: count = 0 return render(request, "proposals/stats_project_personal.html", { "prop": prop, "visitors": count, "applications": [prop.applications.filter(Priority=n).count() for n in range(1, settings.MAX_NUM_APPLICATIONS + 1)], "distributed": prop.distributions.count(), "step": step, 'timeslot': timeslot }) @group_required('type1staff', 'type2staff', 'type3staff', 'type4staff', 'type5staff', 'type6staff') def project_stats(request, timeslot=None): """ Statistics for projects, allowed for all staff except unverified. :param request: :param timeslot: the time slot to view proposals from :return: """ Project = Proposal if timeslot is None: # all projects p
self.rvrow += 1 return def putFieldset ( self,fset_id,title ): pyrvapi.rvapi_add_fieldset ( fset_id,title,self.report_page_id(),self.rvrow,0,1,1 ) self.rvrow += 1 return def putSection ( self,sec_id,sectionName,openState_bool=False ): pyrvapi.rvapi_add_section ( sec_id,sectionName,self.report_page_id(), self.rvrow,0,1,1,openState_bool ) self.rvrow += 1 return # ============================================================================ # define basic HTML report functions def putSummaryLine ( self,line0,line1,line2 ): if self.import_summary_id(): pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line0,self.summary_row,0 ) pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line1,self.summary_row,1 ) pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line2,self.summary_row,2 ) self.summary_row_0 = self.summary_row self.summary_row += 1 return def addSummaryLine ( self,line1,line2 ): if self.import_summary_id(): pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line1,self.summary_row,0 ) pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line2,self.summary_row,1 ) self.summary_row += 1 pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row_0,0,"","","", self.summary_row-self.summary_row_0,1 ); return def putSummaryLine_red ( self,line0,line1,line2 ): if self.import_summary_id(): pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line0,self.summary_row,0 ) pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line1,self.summary_row,1 ) pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line2,self.summary_row,2 ) pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row,0,"", "text-align:left;color:maroon;","",1,1 ) pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row,1,"", "text-align:left;color:maroon;","",1,1 ) pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row,2,"", "text-align:left;color:maroon;","",1,1 ) self.summary_row += 1 return def putTable ( self,tableId,title_str,holderId,row,mode=100 ): pyrvapi.rvapi_add_table ( tableId,title_str,holderId,row,0,1,1, mode ) pyrvapi.rvapi_set_table_style ( tableId, "table-blue","text-align:left;" ) return def setTableHorzHeaders ( self,tableId,header_list,tooltip_list ): for i in range(len(header_list)): pyrvapi.rvapi_put_horz_theader ( tableId,header_list[i], tooltip_list[i],i ) return def putTableLine ( self,tableId,header,tooltip,line,row ): pyrvapi.rvapi_put_vert_theader ( tableId,header,tooltip,row ) if line: pyrvapi.rvapi_put_table_string ( tableId,line,row,0 ) pyrvapi.rvapi_shape_table_cell ( tableId,row,0,"", "text-align:left;width:100%;white-space:nowrap;" + \ "font-family:\"Courier\";text-decoration:none;" + \ "font-weight:normal;font-style:normal;width:auto;", "",1,1 ); return row+1 # ============================================================================ def open_stdin ( self ): self.file_stdin = open ( self.file_stdin_path(),"w" ) return def write_stdin ( self,S ): self.file_stdin.write ( S ) return def close_stdin ( self ): self.file_stdin.close() return # ============================================================================ def writeKWParameter ( self,item ): if item.visible: if (item.type == "integer" or item.type == "real"): self.file_stdin.write ( item.keyword + " " + str(item.value) + "\n" ) elif (item.type == "integer_" or item.type == "real_") and (item.value != ""): self.file_stdin.write ( item.keyword + " " + str(item.value) + "\n" ) elif (item.type == "combobox"): self.file_stdin.write ( item.keyword + " " + item.value + "\n" ) elif (item.type == "checkbox"): if item.value: self.file_stdin.write ( item.keyword + " " + item.translate[1] + "\n" ) else: self.file_stdin.write ( item.keyword + " " + item.translate[0] + "\n" ) return def putKWParameter ( self,item ): if item.visible: if item.type=="checkbox": if item.value: return item.keyword + "\n" else: return "" else: return item.keyword + " " + str(item.value) + "\n" else: return "" def getKWParameter ( self,keyword,item ): if item.visible: if item.type=="checkbox": if item.value: return " " + keyword else: return "" else: v = str(item.value) if v: if keyword.endswith("=") or keyword.endswith("::"): return " " + keyword + v else: return " " + keyword + " " + v else: return "" def getKWItem ( self,item ): if item.visible: if item.type=="checkbox": if hasattr(item,'translate'): if item.value: return " " + item.keyword + str(item.translate[1]) else: return " " + item.keyword + str(item.translate[0]) elif item.value: return " " + item.keyword else: return "" else: v = str(item.value) if v and v!="_blank_": if item.keyword.endswith("=") or item.keyword.endswith("::"): return " " + item.keyword + v else: return " " + item.keyword + " " + v return "" def getParameter ( self,item,checkVisible=True ): if item.visible or not checkVisible: return str(item.value) return "" """ if (item.type == "integer" or item.type == "real"): return str(item.value) elif (item.type == "integer_" or item.type == "real_") and (item.value != ""): return str(item.value) else: return str(item.value) return "" """ # ============================================================================ def makeClass ( self,dict ): return databox.make_class ( dict ) # ============================================================================ def unsetLogParser ( self ): self.file_stdout.flush() self.log_parser = None pyrvapi.rvapi_flush() #self.file_stdout = open ( self.file_stdout_path(),'a' ) return def setGenericLogParser ( self,panel_id,split_sections_bool,graphTables=False ): self.putPanel ( panel_id ) #self.generic_parser_summary = {} self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,split_sections_bool, summary=self.generic_parser_summary, graph_tables=graphTables ) pyrvapi.rvapi_flush() return def setMolrepLogParser ( self,panel_id ): self.putPanel ( panel_id ) self.log_parser = pyrvapi_ext.parsers.molrep_parser ( panel_id ) pyrvapi.rvapi_flush() return # ============================================================================ def stampFileName ( self,serNo,fileName ): return dtype_template.makeFileName ( self.job_id,serNo,fileName ) def makeDataId ( self,serNo ): return dtype_template.makeDataId ( self.job_id,serNo ) # ============================================================================ def storeReportDocument(self,meta_str): if meta_str: pyrvapi.rvapi_put_meta ( meta_str ) pyrvapi.rvapi_store_document2 ( self.reportDocumentName() ) return def restoreReportDocument(self): pyrvapi.rvapi_restore_document2 ( self.reportDocumentName() ) return pyrvapi.rvapi_get_meta() # ============================================================================ def makeFullASUSequenceFile ( self,seq_list,title,fileName ): combseq = "" for s in seq_list: seqstring = self.makeClass(s).getSequence ( self.inputDir() ) for i in range(s.ncopies): combseq += seqstring dtype_sequence.writeSeqFile ( fileName,title,combseq ) return # ============================================================================ def runApp ( self,appName,cmd,quitOnError=True ): input_script = None if self.file_stdin: input_script = self.file_stdin_path() self._scriptNo += 1 rc = command.call ( appName,cmd,"./",input_script, self.file_stdout,self.file_stderr,self.log_parser ) self.file_stdin = None if rc.msg and quitOnError: raise signal.JobFailure ( rc.msg ) return rc # ============================================================================ def calcEDMap ( self,xyzPath,hklData,libPath,filePrefix ): edmap.calcEDMap ( xyzPath,os.path.join(self.inputDir(),hklData.files[0]), libPath,hklData.dataset,filePrefix,self.job_dir, self.file_stdout,self.file_stderr,self.log_parser ) return [ filePrefix + edmap.file_pdb (), filePrefix + edmap.file_mtz (), filePrefix + edmap.file_map (), filePrefix + edmap.file_dmap() ] def calcAnomEDMap ( self,xyzPath,hklData,anom_form,filePrefix ): edmap.calcAnomEDMap ( xyzPath,os.path.join(self.inputDir(),hklData.files[0]), hklData.dataset,anom_form,filePrefix,self.job_dir, self.file_stdout,self.file_stderr,self.log_parser ) return [ filePrefix + edmap.file_pdb(), filePrefix + edmap.file_mtz(), filePrefix + edmap.file_map(), filePrefix + edmap.file_dmap() ] def calcCCP4Maps ( self,mtzPath,filePrefix,source_key="refmac" ): edmap.calcCCP4Maps ( mtzPath,filePrefix,self.job_dir, self.file_stdout,self.file_stderr, source_key,self.log_parser ) return [ filePrefix + edmap.file_map(), filePrefix + edmap.file_dmap() ] def finaliseStructure ( self,xyzPath,name_pattern,hkl,libPath,associated_data_list, subtype,openState_bool=False, title="Output Structure" ): # subtype = 0: copy subtype from associated data # = 1: set MR subtype # = 2: set EP subtype structure = None if os.path.isfile(xyzPath): sec_id = self.refmac_section() + "_" + str(self.widget_no) self.putSection ( sec_id,"Electron Density Calculations with Refmac", openState_bool ) panel_id = self.refmac_report() + "_" + str(self.widget_no) pyrvapi.rvapi_add_panel ( panel_id,sec_id,0,0,1,1 ) #self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,False ) self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,False, summary=self.generic_parser_summary, graph_tables=False ) fnames = self.calcEDMap ( xyzPath,hkl,libPath,name_pattern ) # Register output data. This moves needful files into output directory # and puts the corresponding metadata into output databox structure = self.registerStructure ( fnames[0],fnames[1],fnames[2],fnames[3],libPath ) if structure: structure.addDataAssociation ( hkl.dataId ) structure.setRefmacLabels ( hkl ) for i in range(len(associated_data_list)): if associated_data_list[i]: structure.addDataAssociation ( associated_data_list[i].dataId ) if subtype==0: for i in range(len(associated_data_list)): if associated_data_list[i]: structure.copySubtype ( associated_data_list[i] ) elif subtype==1: structure.addMRSubtype() else: structure.addEPSubtype() structure.addXYZSubtype() if title!="": self.putTitle ( title ) self.putMessage ( "&nbsp;" ) self.putStructureWidget ( "structure_btn_", "Structure and electron density", structure ) else: self.putTitle ( "No Solution Found" ) self.widget_no += 1 return structure def finaliseAnomSubstructure ( self,xyzPath,name_pattern,hkl, associated_data_list, anom_form,openState_bool=False, title="" ): anom_structure = self.finaliseAnomSubstructure1 ( xyzPath,name_pattern, hkl,associated_data_list,anom_form, self.report_page_id(),self.rvrow, openState_bool,title ) self.rvrow += 2 if anom_structure: self.rvrow += 1 if title: self.rvrow += 1 return anom_structure def finaliseAnomSubstructure1 ( self,xyzPath,name_pattern,hkl, associated_data_list,anom_form,pageId, row,openState_bool=False,title="" ): sec_id = self.refmac_section() + "_" + str(self.widget_no) row1 = row pyrvapi.rvapi_add_section ( sec_id, "Anomalous Electron Density Calculations with Refmac", pageId,row1,0,1,1,openState_bool ) row1 += 1 panel_id = self.refmac_report() + "_" + str(self.widget_no) pyrvapi.rvapi_add_panel ( panel_id,sec_id,0,0,1,1 ) #self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,False ) self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,False, summary=self.generic_parser_summary, graph_tables=False ) fnames = self.calcAnomEDMap ( xyzPath,hkl,anom_form,name_pattern ) anom_structure = self.registerStructure ( fnames[0],fnames[1],fnames[2],fnames[3],None ) if anom_structure: anom_structure.addDataAssociation ( hkl.dataId ) anom_structure.setRefmacLabels ( hkl ) for i in range(len(associated_data_list)): if associated_data_list[i]: structure.addDataAssociation ( associated_data_list[i].dataId ) anom_structure.setAnomSubstrSubtype() # anomalous maps self.putMessage1 ( pageId,"&nbsp;",row1,1 ) row1 += 1 if title!="": self.putTitle1 ( pageId,title,row1,1 ) row1 += 1 openState = -1 if openState_bool: openState = 1 self.putStructureWidget1 ( pageId,"anom_structure_btn_", "Anomalous substructure and electron density", anom_structure,openState,row1,1 ) return anom_structure else: self.putTitle1 ( pageId,"No Anomalous Structure Found",row1,1 ) return None def finaliseLigand ( self,code,xyzPath,cifPath,openState_bool=False, title="Ligand Structure" ): ligand = None if os.path.isfile(xyzPath): # Register output data. This moves needful files into output directory # and puts the corresponding metadata into output databox ligand = self.registerLigand ( xyzPath,cifPath ) if ligand: if title!="": self.putTitle ( title ) ligand.code = code self.putLigandWidget ( "ligand_btn_","Ligand structure", ligand ) else: self.putTitle ( "No Ligand Formed" ) self.widget_no += 1 return ligand # ============================================================================ def putInspectButton ( self,dataObject,title,gridId,row,col ): buttonId = "inspect_data_" + str(self.widget_no) self.widget_no += 1 pyrvapi.rvapi_add_button ( buttonId,title,"{function}", "window.parent.rvapi_inspectData(" + self.job_id +\ ",'" + dataObject._type + "','" + dataObject.dataId + "')", False,gridId, row,col,1,1 ) return # ============================================================================ def putRevisionWidget ( self,gridId,row,message,revision ): buttonId = "inspect_" + str(self.widget_no) self.widget_no += 1 pyrvapi.rvapi_add_button ( buttonId,"Inspect","{function}", "window.parent.rvapi_inspectData(" + self.job_id +\ ",'DataRevision','" + revision.dataId + "')", False,gridId, row,0,1,1 ) pyrvapi.rvapi_set_text ( message,gridId, row,1,1,1 ) pyrvapi.rvapi_set_text ( "<font style='font-size:120%;'>\"" + revision.dname + "\"</font>", gridId, row,2,1,1 ) return def registerRevision ( self,revision,serialNo=1,title="Structure Revision", message="<b><i>New structure revision name:</i></b>", gridId = ""
<gh_stars>10-100 import asyncio import logging import os import random import re from datetime import datetime, timedelta from pathlib import Path from typing import Callable, NamedTuple import aiohttp from async_timeout import timeout from buildpg import MultipleValues, Values, asyncpg from .actions import ActionTypes from .emails.defaults import Triggers from .emails.main import EmailActor from .images import upload_background from .settings import Settings from .utils import mk_password, slugify logger = logging.getLogger('nosht.db') patches = [] class Patch(NamedTuple): func: Callable direct: bool = False async def lenient_conn(settings: Settings, with_db=True): if with_db: dsn = settings.pg_dsn else: dsn, _ = settings.pg_dsn.rsplit('/', 1) for retry in range(8, -1, -1): try: async with timeout(2): conn = await asyncpg.connect_b(dsn=dsn) except (asyncpg.PostgresError, OSError) as e: if retry == 0: raise else: logger.warning('pg temporary connection error "%s", %d retries remaining...', e, retry) await asyncio.sleep(1) else: log = logger.debug if retry == 8 else logger.info log('pg connection successful, version: %s', await conn.fetchval('SELECT version()')) return conn DROP_CONNECTIONS = """ SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = $1 AND pid <> pg_backend_pid(); """ async def prepare_database(settings: Settings, overwrite_existing: bool) -> bool: """ (Re)create a fresh database and run migrations. :param settings: settings to use for db connection :param overwrite_existing: whether or not to drop an existing database if it exists :return: whether or not a database has been (re)created """ # the db already exists on heroku and never has to be created if settings.on_heroku: conn = await lenient_conn(settings, with_db=True) try: tables = await conn.fetchval("SELECT count(*) FROM information_schema.tables WHERE table_schema='public'") logger.info('existing tables: %d', tables) if tables > 0: if overwrite_existing: logger.debug('database already exists...') else: logger.debug('database already exists ✓') return False finally: await conn.close() else: conn = await lenient_conn(settings, with_db=False) try: if not overwrite_existing: # don't drop connections and try creating a db if it already exists and we're not overwriting exists = await conn.fetchval('SELECT 1 AS result FROM pg_database WHERE datname=$1', settings.pg_name) if exists: return False await conn.execute(DROP_CONNECTIONS, settings.pg_name) logger.debug('attempting to create database "%s"...', settings.pg_name) try: await conn.execute('CREATE DATABASE {}'.format(settings.pg_name)) except (asyncpg.DuplicateDatabaseError, asyncpg.UniqueViolationError): if overwrite_existing: logger.debug('database already exists...') else: logger.debug('database already exists, skipping creation') return False else: logger.debug('database did not exist, now created') logger.debug('settings db timezone to utc...') await conn.execute(f"ALTER DATABASE {settings.pg_name} SET TIMEZONE TO 'UTC';") finally: await conn.close() conn = await asyncpg.connect(dsn=settings.pg_dsn) try: logger.debug('creating tables from model definition...') async with conn.transaction(): await conn.execute(settings.models_sql + '\n' + settings.logic_sql) finally: await conn.close() logger.info('database successfully setup ✓') return True class SimplePgPool: # pragam: no cover def __init__(self, conn): self.conn = conn # could also add lock to each method of the returned connection self._lock = asyncio.Lock(loop=self.conn._loop) def acquire(self): return self async def __aenter__(self): return self.conn async def execute(self, *args, **kwargs): async with self._lock: return await self.conn.execute(*args, **kwargs) async def fetch(self, *args, **kwargs): async with self._lock: return await self.conn.fetch(*args, **kwargs) async def fetchval(self, *args, **kwargs): async with self._lock: return await self.conn.fetchval(*args, **kwargs) async def fetchrow(self, *args, **kwargs): async with self._lock: return await self.conn.fetchrow(*args, **kwargs) async def __aexit__(self, exc_type, exc_val, exc_tb): pass async def close(self): pass def reset_database(settings: Settings): if not (os.getenv('CONFIRM_DATABASE_RESET') == 'confirm' or input('Confirm database reset? [yN] ') == 'y'): print('cancelling') else: print('resetting database...') loop = asyncio.get_event_loop() loop.run_until_complete(prepare_database(settings, True)) print('done.') def run_patch(settings: Settings, live, patch_name): if patch_name is None: print( 'available patches:\n{}'.format( '\n'.join(' {}: {}'.format(p.func.__name__, p.func.__doc__.strip('\n ')) for p in patches) ) ) return patch_lookup = {p.func.__name__: p for p in patches} try: patch = patch_lookup[patch_name] except KeyError as e: raise RuntimeError(f'patch "{patch_name}" not found in patches: {[p.func.__name__ for p in patches]}') from e if patch.direct: if not live: raise RuntimeError('direct patches must be called with "--live"') print(f'running patch {patch_name} direct') else: print(f'running patch {patch_name} live {live}') loop = asyncio.get_event_loop() return loop.run_until_complete(_run_patch(settings, live, patch)) async def _run_patch(settings, live, patch: Patch): conn = await lenient_conn(settings) tr = None if not patch.direct: tr = conn.transaction() await tr.start() print('=' * 40) try: await patch.func(conn, settings=settings, live=live) except BaseException: print('=' * 40) logger.exception('Error running %s patch', patch.func.__name__) if not patch.direct: await tr.rollback() return 1 else: print('=' * 40) if patch.direct: print('committed patch') else: if live: print('live, committed patch') await tr.commit() else: print('not live, rolling back') await tr.rollback() finally: await conn.close() def patch(*args, direct=False): if args: assert len(args) == 1, 'wrong arguments to patch' func = args[0] patches.append(Patch(func=func)) return func else: def wrapper(func): patches.append(Patch(func=func, direct=direct)) return func return wrapper @patch async def run_logic_sql(conn, settings, **kwargs): """ run logic.sql code. """ await conn.execute(settings.logic_sql) @patch(direct=True) async def update_enums(conn, settings, **kwargs): """ update sql from ActionTypes and Triggers enums (direct) """ for t in ActionTypes: await conn.execute(f"ALTER TYPE ACTION_TYPES ADD VALUE IF NOT EXISTS '{t.value}'") for t in Triggers: await conn.execute(f"ALTER TYPE EMAIL_TRIGGERS ADD VALUE IF NOT EXISTS '{t.value}'") USERS = [ { 'first_name': 'Frank', 'last_name': 'Spencer', 'email': '<EMAIL>', 'role': 'admin', 'status': 'active', 'password': '<PASSWORD>', }, { 'first_name': 'Jane', 'last_name': 'Dow', 'email': '<EMAIL>', 'role': 'host', 'status': 'pending', 'password': '<PASSWORD>', }, ] IMAGES = [ '0KPVDbDZFU/main.jpg', '0PYuY1448h/main.jpg', '8MkUuIAlTC/main.jpg', '8TeTuuJ2Eo/main.jpg', '9temf5JuFg/main.jpg', 'EVmwCc1E3j/main.jpg', 'H0aDotyQ10/main.jpg', 'H2tpkxGYFB/main.jpg', 'LODboU025Q/main.jpg', 'a3Am9F1TwZ/main.jpg', 'bHxDxtbBx6/main.jpg', 'bIh18JppSg/main.jpg', 'hTIw27nQBu/main.jpg', 'hvo3lwnW8O/main.jpg', 'jcOl33tWAW/main.jpg', 'qCLMsyr437/main.jpg', 'tutva5VL4W/main.jpg', 'u0Mnok4eTF/main.jpg', 'vCKpy2SW85/main.jpg', ] async def create_image(upload_path, client, settings): url = settings.s3_demo_image_url + random.choice(IMAGES) async with client.get(url) as r: assert r.status == 200, r.status content = await r.read() return await upload_background(content, upload_path, settings) CATS = [ { 'name': 'Supper Clubs', 'description': 'Eat, drink & discuss middle aged, middle class things like house prices and consumerist guilt', 'ticket_extra_title': 'Dietary Requirements & Extra Information', 'ticket_extra_help_text': 'This is the help text for this field, tell us about your nut allergy', 'sort_index': 1, 'events': [ { 'status': 'published', 'highlight': True, 'name': "<NAME>", 'start_ts': datetime(2032, 1, 28, 19, 0), 'duration': timedelta(hours=2), 'location_name': '31 Testing Road, London', 'location_lat': 51.479415, 'location_lng': -0.132098, 'ticket_limit': 40, 'host_email': '<EMAIL>', 'ticket_types': [{'name': 'Standard', 'price': 30}], }, { 'status': 'published', 'highlight': True, 'name': "<NAME>", 'start_ts': datetime(2032, 2, 10, 18, 0), 'duration': timedelta(hours=3), 'location_name': '253 Brixton Road, London', 'location_lat': 51.514412, 'location_lng': -0.073994, 'ticket_limit': None, 'host_email': '<EMAIL>', 'ticket_types': [{'name': 'Standard', 'price': 25, 'slots_used': 5}], }, ], }, { 'name': '<NAME>', 'description': 'Sing loudly and badly in the company of other people too polite to comment', 'ticket_extra_title': 'Extra Information', 'ticket_extra_help_text': 'This is the help text for this field', 'sort_index': 2, 'events': [ { 'status': 'published', 'highlight': True, 'name': '<NAME>', 'start_ts': datetime(2032, 2, 15), 'duration': None, 'location_name': 'Big Church, London', 'ticket_limit': None, 'host_email': '<EMAIL>', 'ticket_types': [{'name': 'Standard', 'price': None, 'slots_used': 5}], }, { 'status': 'published', 'highlight': False, 'name': '<NAME>', 'start_ts': datetime(2032, 2, 20), 'duration': None, 'location_name': 'Small Church, London', 'ticket_limit': None, 'host_email': '<EMAIL>', 'ticket_types': [{'name': 'Standard', 'price': None}], }, ], }, ] EVENT_LONG_DESCRIPTION = """ Sit quisquam quisquam eius sed tempora. Aliquam labore **quisquam** tempora _voluptatem_. Porro eius eius etincidunt sit etincidunt. Adipisci dolor amet eius. [Magnam quaerat](https://www.example.com). Neque labore est numquam dolorem. Quiquia ipsum ut dolore dolore porro. Voluptatem consectetur amet ipsum adipisci dolor aliquam. Quiquia modi tempora tempora non amet aliquam. Aliquam eius quiquia voluptatem. Numquam numquam etincidunt neque non est est consectetur. ## Lists Example of list: * Tempora ut aliquam consectetur aliquam. * Dolorem quaerat porro ipsum. Sed ipsum tempora est. Neque * amet amet quisquam dolore labore magnam. Numbered: 1. whatever 1. whenever 1. whichever ### Table | foo | bar | | --- | --- | | baz | bim | """ @patch async def create_demo_data(conn, settings, **kwargs): """ Create some demo data for manual testing. """ async with aiohttp.ClientSession() as client: co_slug = 'testing-co' company_id = await conn.fetchval_b( 'INSERT INTO companies (:values__names) VALUES :values RETURNING id', values=Values( name='Testing Company', slug=co_slug, image=await create_image(Path(co_slug) / 'co' / 'image', client, settings), domain=kwargs.get('company_domain', os.getenv('NEW_COMPANY_DOMAIN', 'localhost')), # from "Scolvin Testing" testing account stripe_public_key='<KEY>', stripe_secret_key='<KEY>', ), ) user_lookup = {} for user in USERS: user_lookup[user['email']] = await conn.fetchval_b( 'INSERT INTO users (:values__names) VALUES :values RETURNING id', values=Values(company=company_id, password_hash=mk_password(user.pop('password'), settings), **user), ) for cat in CATS: events = cat.pop('events') cat_slug = slugify(cat['name']) cat_id = await conn.fetchval_b( 'INSERT INTO categories (:values__names) VALUES :values RETURNING id', values=Values( company=company_id, slug=cat_slug, image=await create_image(Path(co_slug) / cat_slug / 'option', client, settings), **cat, ), ) for event in events: ticket_types = event.pop('ticket_types', []) event_slug = slugify(event['name']) event_id = await conn.fetchval_b( 'INSERT INTO events (:values__names) VALUES :values RETURNING id', values=Values( category=cat_id, host=user_lookup[event.pop('host_email')], slug=event_slug, image=await create_image(Path(co_slug) / cat_slug / event_slug, client, settings), short_description='Neque labore est numquam dolorem. Quiquia ipsum ut dolore dolore porro.', long_description=EVENT_LONG_DESCRIPTION, **event, ), ) await conn.executemany_b( 'INSERT INTO ticket_types (:values__names) VALUES :values', [Values(event=event_id, **tt) for
project.set_usage_export_bucket(bucket=bucket_name) def test__set_project_metadata(self): self.assertEqual( len(self.driver._set_project_metadata(None, False, "")), 0) # 'delete' metadata, but retain current sshKeys md = self.driver._set_project_metadata(None, False, "this is a test") self.assertEqual(len(md), 1) self.assertEqual(md[0]['key'], 'sshKeys') self.assertEqual(md[0]['value'], 'this is a test') # 'delete' metadata *and* any existing sshKeys md = self.driver._set_project_metadata(None, True, "this is a test") self.assertEqual(len(md), 0) # add new metadata, keep existing sshKeys, since the new value also # has 'sshKeys', we want the final struct to only have one ke/value # of sshKeys and it should be the "current_keys" gce_md = {'items': [{'key': 'foo', 'value': 'one'}, {'key': 'sshKeys', 'value': 'another test'}]} md = self.driver._set_project_metadata(gce_md, False, "this is a test") self.assertEqual(len(md), 2, str(md)) sshKeys = "" count = 0 for d in md: if d['key'] == 'sshKeys': count += 1 sshKeys = d['value'] self.assertEqual(sshKeys, 'this is a test') self.assertEqual(count, 1) # add new metadata, overwrite existing sshKeys, in this case, the # existing 'sshKeys' value should be replaced gce_md = {'items': [{'key': 'foo', 'value': 'one'}, {'key': 'sshKeys', 'value': 'another test'}]} md = self.driver._set_project_metadata(gce_md, True, "this is a test") self.assertEqual(len(md), 2, str(md)) sshKeys = "" count = 0 for d in md: if d['key'] == 'sshKeys': count += 1 sshKeys = d['value'] self.assertEqual(sshKeys, 'another test') self.assertEqual(count, 1) # add new metadata, remove existing sshKeys. in this case, we had an # 'sshKeys' entry, but it will be removed entirely gce_md = {'items': [{'key': 'foo', 'value': 'one'}, {'key': 'nokeys', 'value': 'two'}]} md = self.driver._set_project_metadata(gce_md, True, "this is a test") self.assertEqual(len(md), 2, str(md)) sshKeys = "" count = 0 for d in md: if d['key'] == 'sshKeys': count += 1 sshKeys = d['value'] self.assertEqual(sshKeys, '') self.assertEqual(count, 0) def test_ex_set_common_instance_metadata(self): # test non-dict self.assertRaises(ValueError, self.driver.ex_set_common_instance_metadata, ['bad', 'type']) # test standard python dict pydict = {'key': 'pydict', 'value': 1} self.driver.ex_set_common_instance_metadata(pydict) # test GCE badly formatted dict bad_gcedict = {'items': 'foo'} self.assertRaises(ValueError, self.driver.ex_set_common_instance_metadata, bad_gcedict) # test gce formatted dict gcedict = {'items': [{'key': 'gcedict1', 'value': 'v1'}, {'key': 'gcedict2', 'value': 'v2'}]} self.driver.ex_set_common_instance_metadata(gcedict) # Verify project notation works project = GCEProject(id=None, name=None, metadata=None, quotas=None, driver=self.driver) project.set_common_instance_metadata(metadata=gcedict) def test_ex_set_node_metadata(self): node = self.driver.ex_get_node('node-name', 'us-central1-a') # test non-dict self.assertRaises(ValueError, self.driver.ex_set_node_metadata, node, ['bad', 'type']) # test standard python dict pydict = {'key': 'pydict', 'value': 1} self.driver.ex_set_node_metadata(node, pydict) # test GCE badly formatted dict bad_gcedict = {'items': 'foo'} self.assertRaises(ValueError, self.driver.ex_set_node_metadata, node, bad_gcedict) # test gce formatted dict gcedict = {'items': [{'key': 'gcedict1', 'value': 'v1'}, {'key': 'gcedict2', 'value': 'v2'}]} self.driver.ex_set_node_metadata(node, gcedict) def test_ex_set_node_labels(self): node = self.driver.ex_get_node('node-name', 'us-central1-a') # Test basic values simplelabel = {'key': 'value'} self.driver.ex_set_node_labels(node, simplelabel) # Test multiple values multilabels = {'item1': 'val1', 'item2': 'val2'} self.driver.ex_set_node_labels(node, multilabels) def test_ex_set_image_labels(self): image = self.driver.ex_get_image('custom-image') # Test basic values simplelabel = {'foo': 'bar'} self.driver.ex_set_image_labels(image, simplelabel) image = self.driver.ex_get_image('custom-image') self.assertTrue('foo' in image.extra['labels']) # Test multiple values multilabels = {'one': '1', 'two': 'two'} self.driver.ex_set_image_labels(image, multilabels) image = self.driver.ex_get_image('custom-image') self.assertEqual(len(image.extra['labels']), 3) self.assertTrue('two' in image.extra['labels']) self.assertTrue('two' in image.extra['labels']) def test_ex_get_region(self): region_name = 'us-central1' region = self.driver.ex_get_region(region_name) self.assertEqual(region.name, region_name) self.assertEqual(region.status, 'UP') self.assertEqual(region.zones[0].name, 'us-central1-a') def test_ex_get_size(self): size_name = 'n1-standard-1' size = self.driver.ex_get_size(size_name) self.assertEqual(size.name, size_name) self.assertEqual(size.extra['zone'].name, 'us-central1-a') self.assertEqual(size.disk, 10) self.assertEqual(size.ram, 3840) self.assertEqual(size.extra['guestCpus'], 1) def test_ex_get_targethttpproxy(self): targethttpproxy_name = 'web-proxy' targethttpproxy = self.driver.ex_get_targethttpproxy( targethttpproxy_name) self.assertEqual(targethttpproxy.name, targethttpproxy_name) self.assertEqual(targethttpproxy.urlmap.name, 'web-map') def test_ex_get_targetinstance(self): targetinstance_name = 'lctargetinstance' targetinstance = self.driver.ex_get_targetinstance(targetinstance_name) self.assertEqual(targetinstance.name, targetinstance_name) self.assertEqual(targetinstance.zone.name, 'us-central1-a') def test_ex_get_targetpool(self): targetpool_name = 'lctargetpool' targetpool = self.driver.ex_get_targetpool(targetpool_name) self.assertEqual(targetpool.name, targetpool_name) self.assertEqual(len(targetpool.nodes), 2) self.assertEqual(targetpool.region.name, 'us-central1') def test_ex_get_instancegroupmanager(self): igmgr_name = 'myinstancegroup' igmgr = self.driver.ex_get_instancegroupmanager(igmgr_name, 'us-central1-b') self.assertEqual(igmgr.name, igmgr_name) self.assertEqual(igmgr.size, 4) self.assertEqual(igmgr.zone.name, 'us-central1-b') # search all zones igmgr = self.driver.ex_get_instancegroupmanager(igmgr_name) self.assertEqual(igmgr.name, igmgr_name) self.assertEqual(igmgr.size, 4) self.assertEqual(igmgr.zone.name, 'us-central1-a') def test_ex_get_instancetemplate(self): instancetemplate_name = 'my-instance-template1' instancetemplate = self.driver.ex_get_instancetemplate( instancetemplate_name) self.assertEqual(instancetemplate.name, instancetemplate_name) self.assertEqual(instancetemplate.extra['properties']['machineType'], 'n1-standard-1') def test_ex_get_snapshot(self): snapshot_name = 'lcsnapshot' snapshot = self.driver.ex_get_snapshot(snapshot_name) self.assertEqual(snapshot.name, snapshot_name) self.assertEqual(snapshot.size, '10') self.assertEqual(snapshot.status, 'READY') def test_ex_get_urlmap(self): urlmap_name = 'web-map' urlmap = self.driver.ex_get_urlmap(urlmap_name) self.assertEqual(urlmap.name, urlmap_name) self.assertEqual(urlmap.default_service.name, 'web-service') def test_ex_get_volume(self): volume_name = 'lcdisk' volume = self.driver.ex_get_volume(volume_name) self.assertEqual(volume.name, volume_name) self.assertEqual(volume.size, '10') self.assertEqual(volume.extra['status'], 'READY') self.assertEqual(volume.extra['type'], 'pd-ssd') def test_ex_get_disktype(self): disktype_name = 'pd-ssd' disktype_zone = 'us-central1-a' disktype = self.driver.ex_get_disktype(disktype_name, disktype_zone) self.assertEqual(disktype.name, disktype_name) self.assertEqual(disktype.zone.name, disktype_zone) self.assertEqual(disktype.extra['description'], 'SSD Persistent Disk') self.assertEqual(disktype.extra['valid_disk_size'], '10GB-10240GB') self.assertEqual(disktype.extra['default_disk_size_gb'], '100') # zone not set disktype_name = 'pd-ssd' disktype = self.driver.ex_get_disktype(disktype_name) self.assertEqual(disktype.name, disktype_name) def test_ex_get_zone(self): zone_name = 'us-central1-b' zone = self.driver.ex_get_zone(zone_name) self.assertEqual(zone.name, zone_name) self.assertFalse(zone.time_until_mw) self.assertFalse(zone.next_mw_duration) zone_no_mw = self.driver.ex_get_zone('us-central1-a') self.assertIsNone(zone_no_mw.time_until_mw) class GCEMockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('gce') json_hdr = {'content-type': 'application/json; charset=UTF-8'} def _get_method_name(self, type, use_param, qs, path): api_path = '/compute/%s' % API_VERSION project_path = '/projects/%s' % GCE_KEYWORD_PARAMS['project'] path = path.replace(api_path, '') # This replace is separate, since there is a call with a different # project name path = path.replace(project_path, '') # The path to get project information is the base path, so use a fake # '/project' path instead if not path: path = '/project' method_name = super(GCEMockHttp, self)._get_method_name( type, use_param, qs, path) return method_name def _setUsageExportBucket(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('setUsageExportBucket_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_custom_node(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_custom_node.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setMachineType( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_setMachineType.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_setMachineType_notstopped( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_setMachineType_notstopped.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_custom_node_setMachineType( self, method, url, body, header): body = { "error": { "errors": [ { "domain": "global", "reason": "invalid", "message": "Invalid value for field 'resource.machineTypes': " "'projects/project_name/zones/us-central1-a/machineTypes/custom-1-61440'. Resource was not found.", } ], "code": 400, "message": "Invalid value for field 'resource.machineTypes': " "'projects/project_name/zones/us-central1-a/machineTypes/custom-1-61440'. Resource was not found." } } return (httplib.BAD_REQUEST, body, self.json_hdr, httplib.responses[httplib.BAD_REQUEST]) def _zones_us_central1_a_instances_stopped_node_setMachineType( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node_setMachineType.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_setMachineType( self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_setMachineType.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_startnode(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_startnode.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_stopped_node_start(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node_start.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_stopped_node_stop(self, method, url, body, header): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node_stop.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_stopped_node(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_stopped_node.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_operations_operation_stopnode(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_operations_operation_stopnode.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_stop(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_stop.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_acceleratorTypes_nvidia_tesla_k80(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_acceleratorTypes_nvidia_tesla_k80.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setMetadata(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_setMetadata_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _zones_us_central1_a_instances_node_name_setLabels(self, method, url, body, headers): body = self.fixtures.load( 'zones_us_central1_a_instances_node_name_setLabels_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_images_custom_image_setLabels(self, method, url, body, headers): self.assertTrue('global/images/custom-image/setLabels' in url) body = self.fixtures.load( 'global_custom_image_setLabels_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _setCommonInstanceMetadata(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('setCommonInstanceMetadata_post.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_subnetworks(self, method, url, body, headers): body = self.fixtures.load('aggregated_subnetworks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_addresses(self, method, url, body, headers): body = self.fixtures.load('aggregated_addresses.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_diskTypes(self, method, url, body, headers): body = self.fixtures.load('aggregated_disktypes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_disks(self, method, url, body, headers): body = self.fixtures.load('aggregated_disks.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_forwardingRules(self, method, url, body, headers): body = self.fixtures.load('aggregated_forwardingRules.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_instances(self, method, url, body, headers): body = self.fixtures.load('aggregated_instances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_instanceGroupManagers(self, method, url, body, headers): body = self.fixtures.load('aggregated_instanceGroupManagers.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_machineTypes(self, method, url, body, headers): body = self.fixtures.load('aggregated_machineTypes.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_targetInstances(self, method, url, body, headers): body = self.fixtures.load('aggregated_targetInstances.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _aggregated_targetPools(self, method, url, body, headers): body = self.fixtures.load('aggregated_targetPools.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_backendServices(self, method, url, body, headers): if method == 'POST': body = self.fixtures.load('global_backendServices_post.json') else: backend_name = getattr(self.test, 'backendservices_mock', 'web-service') body = self.fixtures.load('global_backendServices-%s.json' % backend_name) return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_backendServices_no_backends(self, method, url, body, headers): body = self.fixtures.load('global_backendServices_no_backends.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) def _global_backendServices_web_service(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load( 'global_backendServices_web_service_delete.json') else: body = self.fixtures.load( 'global_backendServices_web_service.json') return (httplib.OK, body,
u('Santa Maria da Vit\u00f3ria - BA'), 'pt': u('Santa Maria da Vit\u00f3ria - BA')}, '55773484':{'en': 'Santana - BA', 'pt': 'Santana - BA'}, '55773485':{'en': 'Carinhanha - BA', 'pt': 'Carinhanha - BA'}, '55773488':{'en': 'Correntina - BA', 'pt': 'Correntina - BA'}, '55773489':{'en': 'Cocos - BA', 'pt': 'Cocos - BA'}, '55773491':{'en': u('S\u00e3o F\u00e9lix do Coribe - BA'), 'pt': u('S\u00e3o F\u00e9lix do Coribe - BA')}, '55773492':{'en': u('Presidente J\u00e2nio Quadros - BA'), 'pt': u('Presidente J\u00e2nio Quadros - BA')}, '55773493':{'en': 'Guanambi - BA', 'pt': 'Guanambi - BA'}, '55773494':{'en': 'Tremedal - BA', 'pt': 'Tremedal - BA'}, '55773495':{'en': u('Caetit\u00e9 - BA'), 'pt': u('Caetit\u00e9 - BA')}, '55773496':{'en': u('Santa Maria da Vit\u00f3ria - BA'), 'pt': u('Santa Maria da Vit\u00f3ria - BA')}, '55773498':{'en': 'Formoso A - BA', 'pt': 'Formoso A - BA'}, '55773499':{'en': 'Sussuarana - BA', 'pt': 'Sussuarana - BA'}, '55773611':{'en': 'Barreiras - BA', 'pt': 'Barreiras - BA'}, '55773612':{'en': 'Barreiras - BA', 'pt': 'Barreiras - BA'}, '55773613':{'en': 'Barreiras - BA', 'pt': 'Barreiras - BA'}, '55773614':{'en': 'Barreiras - BA', 'pt': 'Barreiras - BA'}, '55773616':{'en': 'Formosa do Rio Preto - BA', 'pt': 'Formosa do Rio Preto - BA'}, '55773617':{'en': u('Baian\u00f3polis - BA'), 'pt': u('Baian\u00f3polis - BA')}, '55773618':{'en': u('Crist\u00f3polis - BA'), 'pt': u('Crist\u00f3polis - BA')}, '55773619':{'en': u('Catol\u00e2ndia - BA'), 'pt': u('Catol\u00e2ndia - BA')}, '55773620':{'en': 'Serra do Ramalho - BA', 'pt': 'Serra do Ramalho - BA'}, '55773621':{'en': 'Cotegipe - BA', 'pt': 'Cotegipe - BA'}, '55773622':{'en': 'Angical - BA', 'pt': 'Angical - BA'}, '55773623':{'en': u('S\u00e3o Desid\u00e9rio - BA'), 'pt': u('S\u00e3o Desid\u00e9rio - BA')}, '55773624':{'en': u('Riach\u00e3o das Neves - BA'), 'pt': u('Riach\u00e3o das Neves - BA')}, '55773625':{'en': u('Santa Rita de C\u00e1ssia - BA'), 'pt': u('Santa Rita de C\u00e1ssia - BA')}, '55773626':{'en': 'Wanderley - BA', 'pt': 'Wanderley - BA'}, '55773628':{'en': u('<NAME>\u00e3es - BA'), 'pt': u('<NAME>\u00e3es - BA')}, '55773629':{'en': 'Recife - PE', 'pt': 'Recife - PE'}, '55773639':{'en': u('<NAME>\u00e3es - BA'), 'pt': u('<NAME>\u00e3es - BA')}, '55773641':{'en': u('Mansid\u00e3o - BA'), 'pt': u('Mansid\u00e3o - BA')}, '55773642':{'en': 'Oliveira dos Brejinhos - BA', 'pt': 'Oliveira dos Brejinhos - BA'}, '55773643':{'en': 'Matina - BA', 'pt': 'Matina - BA'}, '55773644':{'en': u('Brotas de Maca\u00fabas - BA'), 'pt': u('Brotas de Maca\u00fabas - BA')}, '55773645':{'en': 'Boquira - BA', 'pt': 'Boquira - BA'}, '55773646':{'en': 'Ipupiara - BA', 'pt': 'Ipupiara - BA'}, '55773647':{'en': 'Ibitiara - BA', 'pt': 'Ibitiara - BA'}, '55773648':{'en': 'Novo Horizonte - BA', 'pt': 'Novo Horizonte - BA'}, '55773652':{'en': u('Muqu\u00e9m de S\u00e3o Francisco - BA'), 'pt': u('Muqu\u00e9m de S\u00e3o Francisco - BA')}, '55773656':{'en': u('Brejol\u00e2ndia - BA'), 'pt': u('Brejol\u00e2ndia - BA')}, '55773657':{'en': 'Tabocas do Brejo Velho - BA', 'pt': 'Tabocas do Brejo Velho - BA'}, '55773658':{'en': 'Ibitiara - BA', 'pt': 'Ibitiara - BA'}, '55773661':{'en': 'Candiba - BA', 'pt': 'Candiba - BA'}, '55773662':{'en': 'Palmas de Monte Alto - BA', 'pt': 'Palmas de Monte Alto - BA'}, '55773663':{'en': u('Morpar\u00e1 - BA'), 'pt': u('Morpar\u00e1 - BA')}, '55773664':{'en': 'Paratinga - BA', 'pt': 'Paratinga - BA'}, '55773667':{'en': u('Pinda\u00ed - BA'), 'pt': u('Pinda\u00ed - BA')}, '55773668':{'en': u('Sebasti\u00e3o Laranjeiras - BA'), 'pt': u('Sebasti\u00e3o Laranjeiras - BA')}, '55773671':{'en': u('S\u00edtio do Mato - BA'), 'pt': u('S\u00edtio do Mato - BA')}, '55773673':{'en': 'Oliveira dos Brejinhos - BA', 'pt': 'Oliveira dos Brejinhos - BA'}, '55773674':{'en': 'Ibipitanga - BA', 'pt': 'Ibipitanga - BA'}, '55773677':{'en': u('\u00c9rico Cardoso - BA'), 'pt': u('\u00c9rico Cardoso - BA')}, '55773678':{'en': u('Botupor\u00e3 - BA'), 'pt': u('Botupor\u00e3 - BA')}, '55773682':{'en': u('Iui\u00fa - BA'), 'pt': u('Iui\u00fa - BA')}, '55773683':{'en': 'Jaborandi - BA', 'pt': 'Jaborandi - BA'}, '55773684':{'en': 'Roda Velha - BA', 'pt': 'Roda Velha - BA'}, '55773686':{'en': 'Serra Dourada - BA', 'pt': 'Serra Dourada - BA'}, '55773687':{'en': u('Can\u00e1polis - BA'), 'pt': u('Can\u00e1polis - BA')}, '55773688':{'en': u('Novo Paran\u00e1 - BA'), 'pt': u('Novo Paran\u00e1 - BA')}, '55773689':{'en': u('Ros\u00e1rio - BA'), 'pt': u('Ros\u00e1rio - BA')}, '55773691':{'en': 'Malhada - BA', 'pt': 'Malhada - BA'}, '55773693':{'en': 'Rio do Pires - BA', 'pt': 'Rio do Pires - BA'}, '55773695':{'en': 'Tanque Novo - BA', 'pt': 'Tanque Novo - BA'}, '55773698':{'en': 'Ibotirama - BA', 'pt': 'Ibotirama - BA'}, '55774009':{'en': u('Vit\u00f3ria da Conquista - BA'), 'pt': u('Vit\u00f3ria da Conquista - BA')}, '55774141':{'en': u('Vit\u00f3ria da Conquista - BA'), 'pt': u('Vit\u00f3ria da Conquista - BA')}, '5579':{'en': 'Sergipe', 'pt': 'Sergipe'}, '55793014':{'en': 'Nossa Senhora do Socorro - SE', 'pt': 'Nossa Senhora do Socorro - SE'}, '55793022':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793045':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793046':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793113':{'en': 'Nossa Senhora do Socorro - SE', 'pt': 'Nossa Senhora do Socorro - SE'}, '55793114':{'en': 'Nossa Senhora do Socorro - SE', 'pt': 'Nossa Senhora do Socorro - SE'}, '55793194':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793198':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793205':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793211':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793213':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793214':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793215':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793217':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793221':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793222':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793223':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793224':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793227':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793236':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '5579324':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793251':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793252':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793254':{'en': 'Nossa Senhora do Socorro - SE', 'pt': 'Nossa Senhora do Socorro - SE'}, '55793255':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793256':{'en': 'Nossa Senhora do Socorro - SE', 'pt': 'Nossa Senhora do Socorro - SE'}, '55793257':{'en': u('S\u00e3o Crist\u00f3v\u00e3o - SE'), 'pt': u('S\u00e3o Crist\u00f3v\u00e3o - SE')}, '55793259':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793260':{'en': 'Barra dos Coqueiros - SE', 'pt': 'Barra dos Coqueiros - SE'}, '55793261':{'en': u('S\u00e3o Crist\u00f3v\u00e3o - SE'), 'pt': u('S\u00e3o Crist\u00f3v\u00e3o - SE')}, '55793262':{'en': 'Barra dos Coqueiros - SE', 'pt': 'Barra dos Coqueiros - SE'}, '55793263':{'en': 'Capela - SE', 'pt': 'Capela - SE'}, '55793264':{'en': 'Itaporanga d\'Ajuda - SE', 'pt': 'Itaporanga d\'Ajuda - SE'}, '55793265':{'en': 'Nossa Senhora das Dores - SE', 'pt': 'Nossa Senhora das Dores - SE'}, '55793266':{'en': 'Santo Amaro das Brotas - SE', 'pt': 'Santo Amaro das Brotas - SE'}, '55793268':{'en': 'General Maynard - SE', 'pt': 'General Maynard - SE'}, '55793269':{'en': 'Riachuelo - SE', 'pt': 'Riachuelo - SE'}, '55793271':{'en': 'Divina Pastora - SE', 'pt': 'Divina Pastora - SE'}, '55793272':{'en': 'Japaratuba - SE', 'pt': 'Japaratuba - SE'}, '55793274':{'en': u('Ros\u00e1rio do Catete - SE'), 'pt': u('Ros\u00e1rio do Catete - SE')}, '55793275':{'en': 'Maruim - SE', 'pt': 'Maruim - SE'}, '55793276':{'en': 'Pirambu - SE', 'pt': 'Pirambu - SE'}, '55793277':{'en': u('Carm\u00f3polis - SE'), 'pt': u('Carm\u00f3polis - SE')}, '55793279':{'en': 'Nossa Senhora do Socorro - SE', 'pt': 'Nossa Senhora do Socorro - SE'}, '55793281':{'en': 'Laranjeiras - SE', 'pt': 'Laranjeiras - SE'}, '55793288':{'en': 'Areia Branca - SE', 'pt': 'Areia Branca - SE'}, '55793297':{'en': 'Siriri - SE', 'pt': 'Siriri - SE'}, '55793302':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793304':{'en': 'Aracaju - SE', 'pt': 'Aracaju - SE'}, '55793313':{'en': 'Feira Nova - SE', 'pt': 'Feira Nova - SE'}, '55793314':{'en': 'Itabi - SE', 'pt': 'Itabi - SE'}, '55793316':{'en': 'Nossa Senhora de Lourdes - SE', 'pt': 'Nossa Senhora de Lourdes - SE'}, '55793318':{'en': 'Monte Alegre de Sergipe - SE', 'pt': 'Monte Alegre de Sergipe - SE'}, '55793319':{'en': 'Gracho Cardoso - SE', 'pt': 'Gracho Cardoso - SE'}, '55793322':{'en': u('Propri\u00e1 - SE'), 'pt': u('Propri\u00e1 - SE')}, '55793337':{'en': u('Po\u00e7o Redondo - SE'), 'pt': u('Po\u00e7o Redondo - SE')}, '55793339':{'en': u('Santana do S\u00e3o Francisco - SE'), 'pt': u('Santana do S\u00e3o Francisco - SE')}, '55793341':{'en': u('Aquidab\u00e3 - SE'), 'pt': u('Aquidab\u00e3 - SE')}, '55793342':{'en': 'Muribeca - SE', 'pt': 'Muribeca - SE'}, '55793343':{'en': 'Pacatuba - SE', 'pt': 'Pacatuba - SE'}, '55793344':{'en': u('Ne\u00f3polis - SE'), 'pt': u('Ne\u00f3polis - SE')}, '55793346':{'en': u('Canind\u00e9 de S\u00e3o Francisco - SE'), 'pt': u('Canind\u00e9 de S\u00e3o Francisco - SE')}, '55793347':{'en': u('Cedro de S\u00e3o Jo\u00e3o - SE'), 'pt': u('Cedro de S\u00e3o Jo\u00e3o - SE')}, '55793348':{'en': u('Japoat\u00e3 - SE'), 'pt': u('Japoat\u00e3 - SE')}, '55793349':{'en': 'Porto da Folha - SE', 'pt': 'Porto da Folha - SE'}, '55793351':{'en': 'Porto da
and var2 samples are given, wants samples of observation return self.sample_observations(var1, var2) def unif_to_sample(self, u: np.ndarray, var1: Union[np.ndarray, None] = None, var2: Union[np.ndarray, None] = None ) -> np.ndarray: # for R2 this u.shape is (2,) assert len(u) == 2 u_for_dist = u[0] u_for_angle = u[1] normal_var = scistats.norm.ppf(u_for_dist) # convert to standard normal dist_sample = (self._cov_sqrt * normal_var + self._observation)[0] angle_sample = (u_for_angle - 0.5) * _TWO_PI if var1 is None: # var2 samples are given, wants samples of var1 if var2 is None: raise ValueError("Samples of at least one variable must be " "specified") return var2 + np.array([dist_sample * np.cos(angle_sample), dist_sample * np.sin(angle_sample)]) elif var2 is None: # var1 samples are given, wants samples of var2 return var1 + np.array([dist_sample * np.cos(angle_sample), dist_sample * np.sin(angle_sample)]) else: # var1 and var2 samples are given, wants samples of observation raise ValueError("Both of var1 and var2 are given.") def evaluate_loglike(self, x): var1 = x[0:self._unary_dim] var2 = x[self._unary_dim:] delta = np.linalg.norm(var1 - var2) - self._observation[0] return -0.5 * (delta ** 2 / self._variance) + self._lnorm def pdf(self, x: np.ndarray) -> np.ndarray: return np.exp(self.log_pdf(x)) def log_pdf(self, x: np.ndarray) -> np.ndarray: var1_sample = x[:, 0:self.var1.dim] var2_sample = x[:, self.var1.dim:] delta = np.linalg.norm(var1_sample[:, self.var1.t_dim_indices] - var2_sample[:, self.var2.t_dim_indices], axis=1, keepdims=True) - \ self._observation[0] return self._noise_distribution.log_pdf(delta) def grad_x_log_pdf(self, x: np.ndarray) -> np.ndarray: var1_sample = x[:, 0:self.var1.dim] var2_sample = x[:, self.var1.dim:] distance = np.linalg.norm(var1_sample[:, self.var1.t_dim_indices] - var2_sample[:, self.var2.t_dim_indices], axis=1, keepdims=True) diff = var1_sample[:, self.var1.t_dim_indices] - var2_sample[:, self.var2.t_dim_indices] delta = distance - self._observation[0] res = np.zeros_like(x) res[:, 0:self.var1.dim][:, self.var1.t_dim_indices] = diff res[:, self.var1.dim:][:, self.var2.t_dim_indices] = -diff low_dist_idx = np.where(distance < 1e-8)[0] others = np.array([idx for idx in range(x.shape[0]) if idx not in low_dist_idx]) if len(low_dist_idx) > 0: res[low_dist_idx] = (res[low_dist_idx] / 1e-8) * self._noise_distribution.grad_x_log_pdf(delta[low_dist_idx]).reshape((-1, 1)) if len(others) > 0: res[others] = (res[others] / distance[others]) * self._noise_distribution.grad_x_log_pdf(delta[others]).reshape((-1, 1)) return res @property def is_gaussian(self): return False class UnaryR2RangeGaussianPriorFactor(ExplicitPriorFactor, UnaryFactor, metaclass=ABCMeta): measurement_variable_type = R1Variable def __init__(self, var: R2Variable, center: np.ndarray, mu: float, sigma: float) -> None: """ Params: sigma: float """ self._distribution = dists.GaussianRangeDistribution( center=center, mu=mu, sigma=sigma ** 2) super().__init__([var], distribution=self._distribution) self._covariance = sigma ** 2 self._precision = 1.0 / self._covariance self._cov_sqrt = sigma self._lnorm = -0.5 * (np.log(_TWO_PI) + np.log(self._covariance)) # ln(normalization) @property def vars(self) -> List[R2Variable]: return self._vars @property def mu(self) -> float: return self._distribution.mean @property def covariance(self) -> float: return self._distribution.sigma @property def center(self) -> np.ndarray: return self._distribution.center def __str__(self) -> str: line = ["Factor", self.__class__.__name__, str(self.vars[0].name), "center:", str(self.center[0]), str(self.center[1]), "mu:", str(self.mu), "sigma", str(self.covariance)] return " ".join(line) @classmethod def construct_from_text(cls, line: str, variables ) -> "UnaryR2RangeGaussianPriorFactor": line = line.strip().split() name_to_var = {var.name: var for var in variables} if line[0] == cls.__name__: var = name_to_var[line[1]] center = np.array([float(line[2]), float(line[3])]) mu = float(line[4]) variance = float(line[5]) factor = cls( var=var, mu=mu, covariance=variance) else: raise ValueError("The factor name is incorrect") return factor def unif_to_sample(self, u) -> np.array: # u is a (2,))numpy array # return a sample on R2 # for R2 this u.shape is (2,) assert len(u) == 2 u_for_dist = u[0] u_for_angle = u[1] normal_var = scistats.norm.ppf(u_for_dist) # convert to standard normal dist_sample = (self._cov_sqrt * normal_var + self.mu) angle_sample = (u_for_angle - 0.5) * _TWO_PI return self.center + np.array([dist_sample * np.cos(angle_sample), dist_sample * np.sin(angle_sample)]) @property def observation(self): return self.mu def evaluate_loglike(self, x): delta = (np.linalg.norm(x - self.center.flatten() - self.observation)) return -0.5 * np.dot(delta, np.dot(self._precision, delta)) + self._lnorm @property def is_gaussian(self): return False class UncertainR2RangeGaussianLikelihoodFactor(ExplicitLikelihoodFactor, LikelihoodFactor, BinaryFactor): """ Likelihood factor on R(2) Inspired by the SNL example in https://arxiv.org/abs/1812.02609 The likelihood model for this factor can be found in the paper """ measurement_dim = 1 measurement_type = R1Variable def __init__(self, var1: Variable, var2: Variable, observation: Union[np.ndarray, float], sigma: float = 1.0, observed_flag: bool = False, unobserved_sigma: float = .3 ) -> None: """ :param var1 :param var2 :param observation: observed distance from var1 to var2 :param sigma: standard deviation of Gaussian distribution for the range measurement :param observed_flag: true if the observation is valid :param unobserved_sigma: standard deviation of Gaussian distribution for the observability """ super().__init__(vars=[var1, var2], log_likelihood=None) self._unary_dim = 2 self._observation = observation if isinstance(observation, np.ndarray) \ else np.array([observation]) self._sigma = sigma # this is for evaluating log-likelihood self._observation_var = type(self). \ measurement_type(name="O" + var1.name + var2.name, variable_type=VariableType.Measurement) self._observed_flag = observed_flag self._unobserved_sigma = unobserved_sigma self._new_var = self._sigma **2 * self._unobserved_sigma **2 / (self._sigma **2 + self._unobserved_sigma **2) self._new_mu = self._unobserved_sigma **2 * self._observation[0] / (self._sigma **2 + self._unobserved_sigma **2) self._noise_distribution = dist.GaussianDistribution( mu=np.zeros(1), sigma=np.array([[self._new_var]])) self._variance = self._new_var self._cov_sqrt = np.sqrt(self._new_var) @property def observation_var(self): return self._observation_var @property def circular_dim_list(self): return self._observation_var.circular_dim_list @classmethod def construct_from_text(cls, line: str, variables: Iterable[Variable] ) -> "UncertainR2RangeGaussianLikelihoodFactor": line = line.strip().split() name_to_var = {var.name: var for var in variables} if line[0] == cls.__name__: var1 = name_to_var[line[1]] var2 = name_to_var[line[2]] obs = float(line[3]) sigma = float(line[4]) flag = bool(int(line[5])) obs_sigma = int(line[6]) factor = cls(var1=var1, var2=var2, observation=obs, sigma=sigma, observed_flag=flag, unobserved_sigma=obs_sigma) else: raise ValueError("The factor name is incorrect") return factor def __str__(self) -> str: line = ["Factor", self.__class__.__name__, str(self.vars[0].name), str(self.vars[1].name), str(self.observation[0]), str(self.sigma), str(int(self.observed_flag)), str(self.unobserved_sigma)] return " ".join(line) @property def observed_flag(self) -> bool: return self._observed_flag @property def unobserved_sigma(self) -> float: return self._unobserved_sigma @property def observation(self) -> np.ndarray: return self._observation def sample_var2_from_var1(self, var1_samples: np.ndarray) -> np.ndarray: if (len(var1_samples.shape) != 2 or var1_samples.shape[0] == 0 or var1_samples.shape[1] != self._unary_dim): raise ValueError("The dimensionality of variable 1 is wrong") num_samples = var1_samples.shape[0] noise_samples = self._noise_distribution.rvs(num_samples) dist_samples = (np.zeros((num_samples, 1)) + self._new_mu + noise_samples) angle_samples = np.random.uniform(-np.pi, np.pi, num_samples).reshape( (num_samples, 1)) return var1_samples + np.hstack((dist_samples * np.cos(angle_samples), dist_samples * np.sin(angle_samples))) def sample_var1_from_var2(self, var2_samples: np.ndarray) -> np.ndarray: if (len(var2_samples.shape) != 2 or var2_samples.shape[0] == 0 or var2_samples.shape[1] != self._unary_dim): raise ValueError("The dimensionality of variable 2 is wrong") num_samples = var2_samples.shape[0] # Generate noise samples in range noise_samples = self._noise_distribution.rvs(num_samples) dist_samples = (np.zeros((num_samples, 1)) + self._new_mu + noise_samples) angle_samples = np.random.uniform(-np.pi, np.pi, num_samples).reshape( num_samples, 1) return var2_samples + np.hstack((dist_samples * np.cos(angle_samples), dist_samples * np.sin(angle_samples))) def sample_observations(self, var1_samples: np.ndarray, var2_samples: np.ndarray ) -> np.ndarray: if not (len(var1_samples.shape) == len(var2_samples.shape) == 2 and var1_samples.shape[0] == var2_samples.shape[0] and var1_samples.shape[1] == var2_samples.shape[1] == self._unary_dim): raise ValueError("Dimensionality of variable 1 or variable 2 is" " wrong") num_samples = var1_samples.shape[0] noise_samples = self._noise_distribution.rvs(num_samples) res = np.sqrt(np.sum((var2_samples - var1_samples) ** 2, axis=1 )).reshape((num_samples, 1)) + noise_samples return res @property def sigma(self) -> float: return self._sigma def sample(self, var1: Union[np.ndarray, None] = None, var2: Union[np.ndarray, None] = None ) -> np.ndarray: """ Generate samples with given samples When var1 samples are var2 samples are given, generate observation samples When var2 samples are given, generate var1 samples When var1 samples are given, generate var2 samples :param var1: samples of var1 :param var2: samples of var2 :return: generated samples """ assert self._observed_flag == True if var1 is None: # var2 samples are given, wants samples of var1 if var2 is None: raise ValueError("Samples of at least one variable must be " "specified") return self.sample_var1_from_var2(var2) elif var2 is None: # var1 samples are given, wants samples of var2 return self.sample_var2_from_var1(var1) else: # var1 and var2 samples are given, wants samples of observation return self.sample_observations(var1, var2) def unif_to_sample(self, u: np.ndarray, var1: Union[np.ndarray, None] = None, var2: Union[np.ndarray, None] = None ) -> np.ndarray: # for R2 this u.shape is (2,) assert self._observed_flag == True assert len(u) == 2 u_for_dist = u[0] u_for_angle = u[1] normal_var = scistats.norm.ppf(u_for_dist) # convert to standard normal dist_sample = (self._cov_sqrt * normal_var + self._new_mu) angle_sample = (u_for_angle - 0.5) * _TWO_PI if var1 is None: # var2 samples are given, wants samples of var1 if var2 is None: raise ValueError("Samples of at least one variable must be " "specified") return var2 + np.array([dist_sample * np.cos(angle_sample), dist_sample * np.sin(angle_sample)]) elif var2 is None: # var1 samples are given, wants samples of var2 return var1 + np.array([dist_sample * np.cos(angle_sample), dist_sample * np.sin(angle_sample)]) else: # var1 and var2 samples are given, wants samples of observation raise ValueError("Both of var1
<reponame>skmezanul/seahub # encoding: utf-8 import hashlib import os import stat import json import mimetypes import urllib2 import logging from math import ceil import posixpath from django.core.cache import cache from django.core.urlresolvers import reverse from django.contrib import messages from django.http import HttpResponse, HttpResponseBadRequest, Http404, \ HttpResponseRedirect from django.shortcuts import render_to_response, redirect from django.template import RequestContext from django.utils.translation import ugettext as _ from django.utils import timezone from django.utils.http import urlquote from django.views.decorators.http import condition import seaserv from seaserv import get_repo, get_commits, is_valid_filename, \ seafserv_threaded_rpc, seafserv_rpc, is_repo_owner, check_permission, \ is_passwd_set, get_file_size, edit_repo, \ get_session_info, set_repo_history_limit, get_commit, \ MAX_DOWNLOAD_DIR_SIZE, send_message from seaserv import seafile_api from pysearpc import SearpcError from seahub.avatar.util import get_avatar_file_storage from seahub.auth.decorators import login_required, login_required_ajax from seahub.auth import login as auth_login from seahub.auth import get_backends from seahub.base.accounts import User from seahub.base.decorators import user_mods_check from seahub.base.models import UserStarredFiles, DirFilesLastModifiedInfo from seahub.contacts.models import Contact from seahub.options.models import UserOptions, CryptoOptionNotSetError from seahub.profile.models import Profile from seahub.share.models import FileShare, PrivateFileDirShare, \ UploadLinkShare from seahub.forms import RepoPassowrdForm, RepoSettingForm from seahub.utils import render_permission_error, render_error, list_to_string, \ get_fileserver_root, gen_shared_upload_link, \ gen_dir_share_link, gen_file_share_link, get_repo_last_modify, \ calculate_repos_last_modify, get_file_type_and_ext, get_user_repos, \ EMPTY_SHA1, normalize_file_path, is_valid_username, \ get_file_revision_id_size, get_ccnet_server_addr_port, \ gen_file_get_url, string2list, MAX_INT, IS_EMAIL_CONFIGURED, \ gen_file_upload_url, \ EVENTS_ENABLED, get_user_events, get_org_user_events, show_delete_days, \ TRAFFIC_STATS_ENABLED, get_user_traffic_stat, new_merge_with_no_conflict, \ user_traffic_over_limit, is_org_context from seahub.utils.paginator import get_page_range from seahub.utils.star import get_dir_starred_files from seahub.views.modules import MOD_PERSONAL_WIKI, enable_mod_for_user, \ disable_mod_for_user from seahub.utils.devices import get_user_devices, do_unlink_device import seahub.settings as settings from seahub.settings import FILE_PREVIEW_MAX_SIZE, INIT_PASSWD, USE_PDFJS, \ FILE_ENCODING_LIST, FILE_ENCODING_TRY_LIST, AVATAR_FILE_STORAGE, \ SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER, SEND_EMAIL_ON_RESETTING_USER_PASSWD, \ ENABLE_SUB_LIBRARY, ENABLE_REPO_HISTORY_SETTING, REPO_PASSWORD_MIN_LENGTH # Get an instance of a logger logger = logging.getLogger(__name__) def validate_owner(request, repo_id): """ Check whether user in the request owns the repo. """ ret = is_repo_owner(request.user.username, repo_id) return True if ret else False def is_registered_user(email): """ Check whether user is registerd. """ try: user = User.objects.get(email=email) except User.DoesNotExist: user = None return True if user else False def get_system_default_repo_id(): try: return seaserv.seafserv_threaded_rpc.get_system_default_repo_id() except SearpcError as e: logger.error(e) return None def check_repo_access_permission(repo_id, user): """Check repo access permission of a user, always return 'rw' when repo is system repo and user is admin. Arguments: - `repo_id`: - `user`: """ if get_system_default_repo_id() == repo_id and user.is_staff: return 'rw' else: return seafile_api.check_repo_access_permission(repo_id, user.username) def get_file_access_permission(repo_id, path, username): """Check user has permission to view the file. 1. check whether this file is private shared. 2. if failed, check whether the parent of this directory is private shared. """ pfs = PrivateFileDirShare.objects.get_private_share_in_file(username, repo_id, path) if pfs is None: dirs = PrivateFileDirShare.objects.list_private_share_in_dirs_by_user_and_repo(username, repo_id) for e in dirs: if path.startswith(e.path): return e.permission return None else: return pfs.permission def gen_path_link(path, repo_name): """ Generate navigate paths and links in repo page. """ if path and path[-1] != '/': path += '/' paths = [] links = [] if path and path != '/': paths = path[1:-1].split('/') i = 1 for name in paths: link = '/' + '/'.join(paths[:i]) i = i + 1 links.append(link) if repo_name: paths.insert(0, repo_name) links.insert(0, '/') zipped = zip(paths, links) return zipped def get_file_download_link(repo_id, obj_id, path): """Generate file download link. Arguments: - `repo_id`: - `obj_id`: - `filename`: """ return reverse('download_file', args=[repo_id, obj_id]) + '?p=' + \ urlquote(path) def get_repo_dirents(request, repo, commit, path, offset=-1, limit=-1): dir_list = [] file_list = [] dirent_more = False if commit.root_id == EMPTY_SHA1: return ([], []) if limit == -1 else ([], [], False) else: try: if limit == -1: dirs = seafile_api.list_dir_by_commit_and_path(commit.repo_id, commit.id, path, offset, limit) else: dirs = seafile_api.list_dir_by_commit_and_path(commit.repo_id, commit.id, path, offset, limit + 1) if len(dirs) == limit + 1: dirs = dirs[:limit] dirent_more = True except SearpcError, e: raise Http404 # return render_error(self.request, e.msg) org_id = -1 starred_files = get_dir_starred_files(request.user.username, repo.id, path, org_id) if repo.version == 0: last_modified_info = DirFilesLastModifiedInfo.objects.get_dir_files_last_modified(repo.id, path) fileshares = FileShare.objects.filter(repo_id=repo.id).filter(username=request.user.username) uploadlinks = UploadLinkShare.objects.filter(repo_id=repo.id).filter(username=request.user.username) view_dir_base = reverse('repo', args=[repo.id]) dl_dir_base = reverse('repo_download_dir', args=[repo.id]) view_file_base = reverse('repo_view_file', args=[repo.id]) file_history_base = reverse('file_revisions', args=[repo.id]) for dirent in dirs: if repo.version == 0: dirent.last_modified = last_modified_info.get(dirent.obj_name, 0) else: dirent.last_modified = dirent.mtime dirent.sharelink = '' dirent.uploadlink = '' if stat.S_ISDIR(dirent.props.mode): dpath = os.path.join(path, dirent.obj_name) if dpath[-1] != '/': dpath += '/' for share in fileshares: if dpath == share.path: dirent.sharelink = gen_dir_share_link(share.token) dirent.sharetoken = share.token break for link in uploadlinks: if dpath == link.path: dirent.uploadlink = gen_shared_upload_link(link.token) dirent.uploadtoken = link.token break p_dpath = posixpath.join(path, dirent.obj_name) dirent.view_link = view_dir_base + '?p=' + urlquote(p_dpath) dirent.dl_link = dl_dir_base + '?p=' + urlquote(p_dpath) dir_list.append(dirent) else: file_list.append(dirent) if repo.version == 0: dirent.file_size = get_file_size(repo.store_id, repo.version, dirent.obj_id) else: dirent.file_size = dirent.size dirent.starred = False fpath = os.path.join(path, dirent.obj_name) p_fpath = posixpath.join(path, dirent.obj_name) dirent.view_link = view_file_base + '?p=' + urlquote(p_fpath) dirent.dl_link = get_file_download_link(repo.id, dirent.obj_id, p_fpath) dirent.history_link = file_history_base + '?p=' + urlquote(p_fpath) if fpath in starred_files: dirent.starred = True for share in fileshares: if fpath == share.path: dirent.sharelink = gen_file_share_link(share.token) dirent.sharetoken = share.token break dir_list.sort(lambda x, y : cmp(x.obj_name.lower(), y.obj_name.lower())) file_list.sort(lambda x, y : cmp(x.obj_name.lower(), y.obj_name.lower())) if limit == -1: return (file_list, dir_list) else: return (file_list, dir_list, dirent_more) def get_unencry_rw_repos_by_user(request): """Get all unencrypted repos the user can read and write. """ username = request.user.username def has_repo(repos, repo): for r in repos: if repo.id == r.id: return True return False org_id = request.user.org.org_id if is_org_context(request) else None owned_repos, shared_repos, groups_repos, public_repos = get_user_repos( username, org_id=org_id) accessible_repos = [] for r in owned_repos: if not has_repo(accessible_repos, r) and not r.encrypted: accessible_repos.append(r) for r in shared_repos + groups_repos + public_repos: if not has_repo(accessible_repos, r) and not r.encrypted: if seafile_api.check_repo_access_permission(r.id, username) == 'rw': accessible_repos.append(r) return accessible_repos def render_recycle_root(request, repo_id): repo = get_repo(repo_id) if not repo: raise Http404 days = show_delete_days(request) try: deleted_entries = seafserv_threaded_rpc.get_deleted(repo_id, days) except: deleted_entries = [] dir_list = [] file_list = [] for dirent in deleted_entries: if stat.S_ISDIR(dirent.mode): dir_list.append(dirent) else: file_list.append(dirent) # Entries sort by deletion time in descending order. dir_list.sort(lambda x, y : cmp(y.delete_time, x.delete_time)) file_list.sort(lambda x, y : cmp(y.delete_time, x.delete_time)) username = request.user.username if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) else: repo_owner = seafile_api.get_repo_owner(repo.id) is_repo_owner = True if repo_owner == username else False enable_clean = False if is_repo_owner: enable_clean = True return render_to_response('repo_recycle_view.html', { 'show_recycle_root': True, 'repo': repo, 'dir_list': dir_list, 'file_list': file_list, 'days': days, 'enable_clean': enable_clean, }, context_instance=RequestContext(request)) def render_recycle_dir(request, repo_id, commit_id): basedir = request.GET.get('base', '') path = request.GET.get('p', '') if not basedir or not path: return render_recycle_root(request, repo_id) if basedir[0] != '/': basedir = '/' + basedir if path[-1] != '/': path += '/' repo = get_repo(repo_id) if not repo: raise Http404 commit = seafserv_threaded_rpc.get_commit(repo.id, repo.version, commit_id) if not commit: raise Http404 zipped = gen_path_link(path, '') file_list, dir_list = get_repo_dirents(request, repo, commit, basedir + path) days = show_delete_days(request) username = request.user.username if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) else: repo_owner = seafile_api.get_repo_owner(repo.id) is_repo_owner = True if repo_owner == username else False enable_clean = False if is_repo_owner: enable_clean = True return render_to_response('repo_recycle_view.html', { 'show_recycle_root': False, 'repo': repo, 'zipped': zipped, 'dir_list': dir_list, 'file_list': file_list, 'commit_id': commit_id, 'basedir': basedir, 'path': path, 'days': days, 'enable_clean': enable_clean, }, context_instance=RequestContext(request)) @login_required def repo_recycle_view(request, repo_id): if check_repo_access_permission(repo_id, request.user) != 'rw': return render_permission_error(request, _(u'Unable to view recycle page')) commit_id = request.GET.get('commit_id', '') if not commit_id: return render_recycle_root(request, repo_id) else: return render_recycle_dir(request, repo_id, commit_id) @login_required def repo_online_gc(request, repo_id): if request.method != 'POST': raise Http404 repo = get_repo(repo_id) if not repo: raise Http404 referer = request.META.get('HTTP_REFERER', None) next = settings.SITE_ROOT if referer is None else referer username = request.user.username if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) else: repo_owner = seafile_api.get_repo_owner(repo.id) is_repo_owner = True if repo_owner == username else False if not is_repo_owner: messages.error(request, _('Permission denied')) return HttpResponseRedirect(next) day = int(request.POST.get('day')) try: seafile_api.clean_up_repo_history(repo.id, day) except SearpcError, e: messages.error(request, _('Internal server error')) return HttpResponseRedirect(next) return HttpResponseRedirect(next) @login_required def repo_settings(request, repo_id): """List and change library settings. """ username = request.user.username repo = seafile_api.get_repo(repo_id) if not repo: raise Http404 # no settings for virtual repo if ENABLE_SUB_LIBRARY and repo.is_virtual: raise Http404 # check permission if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) else: repo_owner = seafile_api.get_repo_owner(repo.id) is_owner = True if username == repo_owner else False if not is_owner: raise Http404 history_limit = seaserv.get_repo_history_limit(repo.id) full_history_checked = no_history_checked = partial_history_checked = False if history_limit > 0: partial_history_checked = True elif history_limit == 0: no_history_checked = True else: full_history_checked = True full_history_enabled = no_history_enabled = partial_history_enabled = True days_enabled = True if not ENABLE_REPO_HISTORY_SETTING: full_history_enabled = no_history_enabled = partial_history_enabled = False days_enabled = False if history_limit <= 0: days_enabled = False return render_to_response('repo_settings.html', { 'repo': repo, 'repo_owner': repo_owner,
object. >>> w.histogram[-1] (8, 3) """ # if the a vertex-to-vertex network distance matrix is # not present in the `network.Network` object; calculate # one at this point if not hasattr(self, "distance_matrix"): self.full_distance_matrix(n_processes, gen_tree=gen_tree) # identify all network vertices which are within the # `threshold` parameter neighbor_query = numpy.where(self.distance_matrix < threshold) # create an instance for recording neighbors which # inserts a new key if not present in object neighbors = defaultdict(list) # iterate over neighbors within the `threshold` # and record all network vertices as neighbors # if the vertex is not being compared to itself for i, n in enumerate(neighbor_query[0]): neigh = neighbor_query[1][i] if n != neigh: neighbors[n].append(neigh) # call libpysal for `W` instance w = weights.W(neighbors) return w def snapobservations(self, in_data, name, idvariable=None, attribute=False): """Snap a point pattern shapefile to a network object. The point pattern is stored in the ``network.pointpattern`` attribute of the network object. Parameters ---------- in_data : {geopandas.GeoDataFrame, str} The input geographic data. Either (1) a path to a shapefile (str); or (2) a ``geopandas.GeoDataFrame``. name : str Name to be assigned to the point dataset. idvariable : str Column name to be used as the ID variable. attribute : bool Defines whether attributes should be extracted. ``True`` for attribute extraction. ``False`` for no attribute extraction. Default is ``False``. Notes ----- See :cite:`doi:10.1111/gean.12211` for a detailed discussion on the modeling consequences of snapping points to spatial networks. Examples -------- Instantiate a network. >>> import spaghetti >>> from libpysal import examples >>> streets_file = examples.get_path("streets.shp") >>> ntw = spaghetti.Network(in_data=streets_file) Snap observations to the network. >>> pt_str = "crimes" >>> in_data = examples.get_path(pt_str+".shp") >>> ntw.snapobservations(in_data, pt_str, attribute=True) Isolate the number of points in the dataset. >>> ntw.pointpatterns[pt_str].npoints 287 """ # create attribute of `pointpattern` but instantiating a # `network.PointPattern` class self.pointpatterns[name] = PointPattern( in_data=in_data, idvariable=idvariable, attribute=attribute ) # allocate the point observations to the nework self._snap_to_link(self.pointpatterns[name]) def compute_distance_to_vertices(self, x, y, arc): """Given an observation on a network arc, return the distance to the two vertices that bound that end. Parameters ---------- x : float The x-coordinate of the snapped point. y : float The y-coordinate of the snapped point. arc : tuple The (vtx0, vtx1) representation of the network arc. Returns ------- d1 : float The distance to vtx0. Always the vertex with the lesser ID. d2 : float The distance to vtx1. Always the vertex with the greater ID. """ # distance to vertex 1 d1 = util.compute_length((x, y), self.vertex_coords[arc[0]]) # distance to vertex 2 d2 = util.compute_length((x, y), self.vertex_coords[arc[1]]) return d1, d2 def compute_snap_dist(self, pattern, idx): """Given an observation snapped to a network arc, calculate the distance from the original location to the snapped location. Parameters ----------- pattern : spaghetti.PointPattern The point pattern object. idx : int The point ID. Returns ------- dist : float The euclidean distance from original location to the snapped location. """ # set of original (x,y) point coordinates loc = pattern.points[idx]["coordinates"] # set of snapped (x,y) point coordinate snp = pattern.snapped_coordinates[idx] # distance from the original location to # the snapped location along the network dist = util.compute_length(loc, snp) return dist def _snap_to_link(self, pointpattern): """Used internally to snap point observations to network arcs. Parameters ----------- pointpattern : spaghetti.PointPattern The point pattern object. Returns ------- obs_to_arc : dict Dictionary with arcs as keys and lists of points as values. arc_to_obs : dict Dictionary with point IDs as keys and arc tuples as values. dist_to_vertex : dict Dictionary with point IDs as keys and values as dictionaries with keys for vertex IDs and values as distances from point to vertex. dist_snapped : dict Dictionary with point IDs as keys and distance from point to the network arc that it is snapped. """ # instantiate observations snapped coordinates lookup pointpattern.snapped_coordinates = {} # record throw-away arcs (pysal.cg.Chain) enumerator arcs_ = [] # snapped(point)-to-arc lookup s2a = {} # iterate over network arc IDs for arc in self.arcs: # record the start and end of the arc head = self.vertex_coords[arc[0]] tail = self.vertex_coords[arc[1]] # create a pysal.cg.Chain object of the arc # and add it to the arcs enumerator arcs_.append(util._chain_constr(None, [head, tail])) # add the arc into the snapped(point)-to-arc lookup s2a[(head, tail)] = arc # instantiate crosswalks points = {} # point ID to coordinates lookup obs_to_arc = {} # observations to arcs lookup dist_to_vertex = {} # distance to vertices lookup dist_snapped = {} # snapped distance lookup # fetch and records point coordinates keyed by ID for point_idx, point in pointpattern.points.items(): points[point_idx] = point["coordinates"] # snap point observations to the network snapped = util.snap_points_to_links(points, arcs_) # record obs_to_arc, dist_to_vertex, and dist_snapped # -- iterate over the snapped observation points for point_idx, snap_info in snapped.items(): # fetch the x and y coordinate x, y = snap_info[1].tolist() # look up the arc from snapped(point)-to-arc arc = s2a[tuple(snap_info[0])] # add the arc key to observations to arcs lookup if arc not in obs_to_arc: obs_to_arc[arc] = {} # add the (x,y) coordinates of the original observation # point location to the observations to arcs lookup obs_to_arc[arc][point_idx] = (x, y) # add the (x,y) coordinates of the snapped observation # point location to the snapped coordinates lookup pointpattern.snapped_coordinates[point_idx] = (x, y) # calculate the distance to the left and right vertex # along the network link from the snapped point location d1, d2 = self.compute_distance_to_vertices(x, y, arc) # record the distances in the distance to vertices lookup dist_to_vertex[point_idx] = {arc[0]: d1, arc[1]: d2} # record the snapped distance dist_snapped[point_idx] = self.compute_snap_dist(pointpattern, point_idx) # instantiate observations to network vertex lookup obs_to_vertex = defaultdict(list) # iterate over the observations to arcs lookup for k, v in obs_to_arc.items(): # record the left and right vertex ids keys = v.keys() obs_to_vertex[k[0]] = keys obs_to_vertex[k[1]] = keys # iterate over components and assign observations component_to_obs = {} for comp, _arcids in self.network_component2arc.items(): component_to_obs[comp] = [] for lk, odict in obs_to_arc.items(): if lk in _arcids: component_to_obs[comp].extend(list(odict.keys())) # set crosswalks as attributes of the `pointpattern` class pointpattern.obs_to_arc = obs_to_arc pointpattern.component_to_obs = component_to_obs pointpattern.dist_to_vertex = dist_to_vertex pointpattern.dist_snapped = dist_snapped pointpattern.obs_to_vertex = list(obs_to_vertex) def count_per_link(self, obs_on, graph=False): """Compute the counts per arc or edge (link). Parameters ---------- obs_on : dict Dictionary of observations on the network. Either in the form ``{(<LINK>):{<POINT_ID>:(<COORDS>)}}`` or ``{<LINK>:[(<COORD>),(<COORD>)]}``. graph : bool Count observations on graph edges (``True``) or network arcs (``False``). Default is ``False``. Returns ------- counts : dict Counts per network link in the form ``{(<LINK>):<COUNT>}``. Examples -------- Note that this passes the ``obs_to_arc`` or ``obs_to_edge`` attribute of a point pattern snapped to the network. >>> import spaghetti >>> from libpysal import examples >>> ntw = spaghetti.Network(examples.get_path("streets.shp")) Snap observations to the network. >>> ntw.snapobservations( ... examples.get_path("crimes.shp"), "crimes", attribute=True ... ) >>> counts = ntw.count_per_link( ... ntw.pointpatterns["crimes"].obs_to_arc, graph=False ... ) >>> counts[(140, 142)] 10 >>> s = sum([v for v in list(counts.values())]) >>> s 287 """ # instantiate observation counts by link lookup counts = {} # graph-theoretic object of nodes and edges if graph: # iterate the links-to-observations lookup for key, observations in obs_on.items(): # isolate observation count for the link cnt = len(observations) # extract link (edges) key if key in self.arcs_to_edges.keys(): key = self.arcs_to_edges[key] # either add to current count or a dictionary # entry or create new dictionary entry try: counts[key] += cnt except KeyError: counts[key] = cnt # network object of arcs and vertices else: # simplified version of the above process for key in obs_on.keys(): counts[key] = len(obs_on[key]) return counts def _newpoint_coords(self, arc, distance): """Used internally to compute new point coordinates during snapping.""" # extract coordinates for vertex 1 of arc x1 = self.vertex_coords[arc[0]][0] y1 = self.vertex_coords[arc[0]][1] # extract coordinates for vertex 2 of arc x2 = self.vertex_coords[arc[1]][0] y2 = self.vertex_coords[arc[1]][1] # if the network arc is vertical set the (x) coordinate # and
# -*- coding: utf-8 -*- ''' Integration tests for git_pillar The base classes for all of these tests are in tests/support/gitfs.py. Repositories for the tests are generated on the fly (look for the "make_repo" function). Where possible, a test case in this module should be reproduced in the following ways: 1. GitPython over SSH (TestGitPythonSSH) 2. GitPython over HTTP (TestGitPythonHTTP) 3. GitPython over HTTP w/basic auth (TestGitPythonAuthenticatedHTTP) 4. pygit2 over SSH (TestPygit2SSH) 5. pygit2 over HTTP (TestPygit2HTTP) 6. pygit2 over HTTP w/basic auth (TestPygit2AuthenticatedHTTP) For GitPython, this is easy, since it does not support the authentication configuration parameters that pygit2 does. Therefore, this test module includes a GitPythonMixin class which can be reused for all three GitPython test classes. The only thing we vary for these tests is the URL that we use. For pygit2 this is more complicated, since it supports A) both passphraseless and passphrase-protected SSH keys, and B) both global and per-remote credential parameters. So, for SSH tests we need to run each GitPython test case in 4 different ways to cover pygit2: 1. Passphraseless key, global credential options 2. Passphraseless key, per-repo credential options 3. Passphrase-protected key, global credential options 4. Passphrase-protected key, per-repo credential options For HTTP tests, we need to run each GitPython test case in 2 different ways to cover pygit2 with authentication: 1. Global credential options 2. Per-repo credential options For unauthenticated HTTP, we can just run a single case just like for a GitPython test function, with the only change being to the git_pillar_provider config option. The way we accomplish the extra test cases for pygit2 is not by writing more test functions, but to keep the same test function names both in the GitPython test classes and the pygit2 test classes, and just perform multiple pillar compilations and asserts in each pygit2 test function. For SSH tests, a system user is added and a temporary sshd instance is started on a randomized port. The user and sshd server are torn down after the tests are run. For HTTP tests, nginx + uWSGI + git-http-backend handles serving the repo. However, there was a change in git 2.4.4 which causes a fetch to hang when using uWSGI. This was worked around in uWSGI 2.0.13 by adding an additional setting. However, Ubuntu 16.04 LTS ships with uWSGI 2.0.12 in their official repos, so to work around this we pip install a newer uWSGI (with CGI support baked in) within a virtualenv the test suite creates, and then uses that uwsgi binary to start the uWSGI daemon. More info on the git issue and the uWSGI workaround can be found in the below two links: https://github.com/git/git/commit/6bc0cb5 https://github.com/unbit/uwsgi/commit/ac1e354 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import random import string # Import Salt Testing libs from tests.support.gitfs import ( USERNAME, PASSWORD, GitPillarSSHTestBase, GitPillarHTTPTestBase, ) from tests.support.helpers import ( destructiveTest, requires_system_grains, skip_if_not_root ) from tests.support.mock import NO_MOCK, NO_MOCK_REASON from tests.support.unit import skipIf # Import Salt libs import salt.utils.path import salt.utils.platform from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES as VIRTUALENV_NAMES from salt.ext.six.moves import range # pylint: disable=redefined-builtin from salt.utils.gitfs import ( GITPYTHON_VERSION, GITPYTHON_MINVER, PYGIT2_VERSION, PYGIT2_MINVER, LIBGIT2_VERSION, LIBGIT2_MINVER ) # Check for requisite components try: HAS_GITPYTHON = GITPYTHON_VERSION >= GITPYTHON_MINVER except Exception: HAS_GITPYTHON = False try: HAS_PYGIT2 = PYGIT2_VERSION >= PYGIT2_MINVER \ and LIBGIT2_VERSION >= LIBGIT2_MINVER except Exception: HAS_PYGIT2 = False HAS_SSHD = bool(salt.utils.path.which('sshd')) HAS_NGINX = bool(salt.utils.path.which('nginx')) HAS_VIRTUALENV = bool(salt.utils.path.which_bin(VIRTUALENV_NAMES)) def _rand_key_name(length): return 'id_rsa_{0}'.format( ''.join(random.choice(string.ascii_letters) for _ in range(length)) ) def _windows_or_mac(): return salt.utils.platform.is_windows() or salt.utils.platform.is_darwin() class GitPythonMixin(object): ''' GitPython doesn't support anything fancy in terms of authentication options, so all of the tests for GitPython can be re-used via this mixin. ''' def test_single_source(self): ''' Test using a single ext_pillar repo ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} ext_pillar: - git: - master {url} ''') self.assertEqual( ret, {'branch': 'master', 'mylist': ['master'], 'mydict': {'master': True, 'nested_list': ['master'], 'nested_dict': {'master': True}}} ) def test_multiple_sources_master_dev_no_merge_lists(self): ''' Test using two ext_pillar dirs. Since all git_pillar repos are merged into a single dictionary, ordering matters. This tests with the master branch followed by dev, and with pillar_merge_lists disabled. ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} pillar_merge_lists: False ext_pillar: - git: - master {url} - dev {url} ''') self.assertEqual( ret, {'branch': 'dev', 'mylist': ['dev'], 'mydict': {'master': True, 'dev': True, 'nested_list': ['dev'], 'nested_dict': {'master': True, 'dev': True}}} ) def test_multiple_sources_dev_master_no_merge_lists(self): ''' Test using two ext_pillar dirs. Since all git_pillar repos are merged into a single dictionary, ordering matters. This tests with the dev branch followed by master, and with pillar_merge_lists disabled. ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} pillar_merge_lists: False ext_pillar: - git: - dev {url} - master {url} ''') self.assertEqual( ret, {'branch': 'master', 'mylist': ['master'], 'mydict': {'master': True, 'dev': True, 'nested_list': ['master'], 'nested_dict': {'master': True, 'dev': True}}} ) def test_multiple_sources_master_dev_merge_lists(self): ''' Test using two ext_pillar dirs. Since all git_pillar repos are merged into a single dictionary, ordering matters. This tests with the master branch followed by dev, and with pillar_merge_lists enabled. ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} pillar_merge_lists: True ext_pillar: - git: - master {url} - dev {url} ''') self.assertEqual( ret, {'branch': 'dev', 'mylist': ['master', 'dev'], 'mydict': {'master': True, 'dev': True, 'nested_list': ['master', 'dev'], 'nested_dict': {'master': True, 'dev': True}}} ) def test_multiple_sources_dev_master_merge_lists(self): ''' Test using two ext_pillar dirs. Since all git_pillar repos are merged into a single dictionary, ordering matters. This tests with the dev branch followed by master, and with pillar_merge_lists enabled. ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} pillar_merge_lists: True ext_pillar: - git: - dev {url} - master {url} ''') self.assertEqual( ret, {'branch': 'master', 'mylist': ['dev', 'master'], 'mydict': {'master': True, 'dev': True, 'nested_list': ['dev', 'master'], 'nested_dict': {'master': True, 'dev': True}}} ) def test_multiple_sources_with_pillarenv(self): ''' Test using pillarenv to restrict results to those from a single branch ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} pillarenv: base ext_pillar: - git: - master {url} - dev {url} ''') self.assertEqual( ret, {'branch': 'master', 'mylist': ['master'], 'mydict': {'master': True, 'nested_list': ['master'], 'nested_dict': {'master': True}}} ) def test_includes_enabled(self): ''' Test with git_pillar_includes enabled. The top_only branch references an SLS file from the master branch, so we should see the key from that SLS file (included_pillar) in the compiled pillar data. ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython cachedir: {cachedir} extension_modules: {extmods} ext_pillar: - git: - master {url} - top_only {url}: - env: base ''') self.assertEqual( ret, {'branch': 'master', 'mylist': ['master'], 'mydict': {'master': True, 'nested_list': ['master'], 'nested_dict': {'master': True}}, 'included_pillar': True} ) def test_includes_disabled(self): ''' Test with git_pillar_includes enabled. The top_only branch references an SLS file from the master branch, but since includes are disabled it will not find the SLS file and the "included_pillar" key should not be present in the compiled pillar data. We should instead see an error message in the compiled data. ''' ret = self.get_pillar('''\ file_ignore_regex: [] file_ignore_glob: [] git_pillar_provider: gitpython git_pillar_includes: False cachedir: {cachedir} extension_modules: {extmods} ext_pillar: - git: - master {url} - top_only {url}: - env: base ''') self.assertEqual( ret, {'branch': 'master', 'mylist': ['master'], 'mydict': {'master': True, 'nested_list': ['master'], 'nested_dict': {'master': True}}, '_errors': ["Specified SLS 'bar' in environment 'base' is not " "available on the salt master"]} ) @destructiveTest @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @skip_if_not_root @skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required'.format(GITPYTHON_MINVER)) @skipIf(not HAS_SSHD, 'sshd not present') class TestGitPythonSSH(GitPillarSSHTestBase, GitPythonMixin): ''' Test git_pillar with GitPython using SSH authentication ''' id_rsa_nopass = _rand_key_name(8) id_rsa_withpass = _rand_key_name(8) username = USERNAME passphrase = PASSWORD @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @skip_if_not_root @skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required'.format(GITPYTHON_MINVER)) @skipIf(not HAS_NGINX, 'nginx not present') @skipIf(not HAS_VIRTUALENV, 'virtualenv not present') class TestGitPythonHTTP(GitPillarHTTPTestBase, GitPythonMixin): ''' Test git_pillar with GitPython using unauthenticated HTTP ''' pass @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @skip_if_not_root @skipIf(not HAS_GITPYTHON, 'GitPython >= {0} required'.format(GITPYTHON_MINVER)) @skipIf(not HAS_NGINX, 'nginx not present') @skipIf(not HAS_VIRTUALENV, 'virtualenv not present') class TestGitPythonAuthenticatedHTTP(TestGitPythonHTTP, GitPythonMixin): ''' Test git_pillar with GitPython using authenticated HTTP ''' username = USERNAME password = PASSWORD @classmethod def setUpClass(cls): ''' Create start the webserver ''' super(TestGitPythonAuthenticatedHTTP, cls).setUpClass() # Override the URL set up in the parent class to encode the # username/password into it. cls.url = 'http://{username}:{password}@127.0.0.1:{port}/repo.git'.format( username=cls.username, password=<PASSWORD>, port=cls.nginx_port) cls.ext_opts['url'] = cls.url cls.ext_opts['username'] = cls.username cls.ext_opts['password']
<reponame>Rohith04MVK/Neutron-Bot import typing as t from abc import abstractmethod from collections import defaultdict from contextlib import suppress from dataclasses import field, make_dataclass from importlib import import_module import asyncpg from loguru import logger if t.TYPE_CHECKING: from bot.core.bot import Bot class Singleton(type): """ This is Singleton Design Pattern. It makes sure that classes with this metaclass will only ever have one single instance, when they're initiated for the first time this instance is created, every next initiation will simply result in returning the stored single instace. """ _instance = None def __call__(cls, *args, **kwargs): """If instance already exists, return it.""" if not cls._instance: cls._instance = super(Singleton, cls).__call__(*args, **kwargs) return cls._instance class DBTable(metaclass=Singleton): """ This is a basic database table structure model. This class automatically creates the initial database tables accordingly to `columns` dict which is a mandantory class parameter defined in the top-level class, it should look like this: columns = { "column_name": "SQL creation syntax", "example": "NUMERIC(40) UNIQUE NOT NULL" ... } After the table is populated, caching will be automatically set up based on the `caching` dict which is an optional class parameter defined in the top-level class, if this parameter isn't defined, caching will be skipped. Example for caching: caching = { "key": "table_name", # This will be the key for the stored `cache` dict # These will be the entries for the cache "column_name": (python datatype, default_value), "column_name2": python datatype # default_value is optional } There are also multiple methods which serves as an abstraction layer for for executing raw SQL code. There is also a special `reference` classmethod which will return the running instance (from the singleton model). """ def __init__(self, db: "Database", table_name: str): self.database = db self.table = table_name self.pool = self.database.pool self.timeout = self.database.timeout self.cache = {} @abstractmethod async def __async_init__(self) -> None: """ This is asynchronous initialization function which will get automatically called by `Database` when the table is added. (Calling this method is handeled by the `_populate` function). """ raise NotImplementedError async def _init(self) -> None: """ This method calls `_populate` and `_make_cache` to make all the db tables and create the table cache. After that, `__async_init__` method is called which refers to top-level async initialization, if this method isn't defined, nothing will happen. This also makes sure that `columns` dictionary is defined properly in the top-level class.. """ if not hasattr(self, "columns") or not isinstance(self.columns, dict): raise RuntimeError(f"Table {self.__class__} doesn't have a `columns` dict defined properly.") await self._populate() await self._make_cache() with suppress(NotImplementedError): await self.__async_init__() async def _populate(self) -> None: """ This method is used to create the initial table structure and define it's structure and columns. This method also calls `__async_init__` method on top level table (if there is one). """ table_structure = ",\n".join(f"{column} {sql_details}" for column, sql_details in self.columns.items()) populate_command = f"CREATE TABLE IF NOT EXISTS {self.table} (\n{table_structure}\n)" logger.trace(f"Populating {self.__class__}") async with self.pool.acquire(timeout=self.timeout) as db: await db.execute(populate_command) async def _make_cache(self) -> None: """ Crate and populate basic caching model from top-level `self.caching`. This function creates `self.cache_columns` which stores the cached columns and their type together with `self.cache` which stores the actual cache. """ if not hasattr(self, "caching") or not isinstance(self.caching, dict): logger.trace(f"Skipping defining cache for {self.__class__}, `caching` dict wasn't specified") return self.cache_columns = {} cache_key_type, self._cache_key = self.caching.pop("key") self.cache_columns[self._cache_key] = cache_key_type # Create cache model field_list = [] for column, specification in self.caching.items(): if isinstance(specification, tuple): val = (column, specification[0], field(default=specification[1])) _type = specification[0] elif specification is None: val = column _type = None else: val = (column, specification) _type = specification field_list.append(val) self.cache_columns[column] = _type self._cache_model = make_dataclass("Entry", field_list) # Create and populate the cache self.cache = defaultdict(self._cache_model) columns = list(self.columns.keys()) entries = await self.db_get(columns) # Get db entries to store for entry in entries: db_entry = {} for col_name, record in zip(columns, entry): # Convert to specified type with suppress(IndexError, TypeError): _type = self.cache_columns[col_name] record = _type(record) db_entry[col_name] = record # Store the cache model into the cache key = db_entry.pop(self._cache_key) cache_entry = self._cache_model(**db_entry) self.cache[key] = cache_entry def cache_update(self, key: str, column: str, value: t.Any) -> None: """ Update the stored cache value for `update_key` on `primary_value` to given `update_value`. """ setattr(self.cache[key], column, value) def cache_get(self, key: str, column: str) -> t.Any: """ Obtain the value of `attribute` stored in cache for `primary_value` """ return getattr(self.cache[key], column) @classmethod def reference(cls) -> "DBTable": """ This is a method which returns the running instance of given class. This works based on the singleton single instance model and it was added as a substitution for calling __init__ from the top level class directly, since that requires passing arguments which won't be used due to the single instance model, using the `reference` function allows you to retrieve this instance without the need of passing any additional arguments. It should be noted that using this will return the instance of the top-level class, but the editor will only see it as an instance of this class (`DBTable`) due to the return type being set to it. To circumvent this you should statically define the type of the variable which will be used to store this instance. """ return cls._instance async def db_execute(self, sql: str, sql_args: t.Optional[list] = None) -> None: """ This method serves as an abstraction layer from using context manager and executing the sql command directly from there. """ if not sql_args: sql_args = [] async with self.pool.acquire(timeout=self.timeout) as db: await db.execute(sql, *sql_args) async def db_fetchone(self, sql: str, sql_args: t.Optional[list] = None) -> asyncpg.Record: """ This method serves as an abstraction layer from using context manager and fetching the sql query directly from there. """ if not sql_args: sql_args = [] async with self.pool.acquire(timeout=self.timeout) as db: return await db.fetchrow(sql, *sql_args) async def db_fetch(self, sql: str, sql_args: t.Optional[list] = None) -> t.List[asyncpg.Record]: """ This method serves as an abstraction layer from using context manager and fetching the sql query directly from there. """ if not sql_args: sql_args = [] async with self.pool.acquire(timeout=self.timeout) as db: return await db.fetch(sql, *sql_args) async def db_get( self, columns: t.List[str], specification: t.Optional[str] = None, sql_args: t.Optional[list] = None ) -> t.Union[asyncpg.Record, t.List[asyncpg.Record]]: """ This method serves as an abstraction layer from using SQL syntax in the top-level database table class, it runs the basic selection (get) query without needing to use SQL syntax at all. """ sql = f"SELECT {' ,'.join(columns)} FROM {self.table}" if specification: sql += f" WHERE {specification}" if len(columns) == 1: return await self.db_fetchone(sql, sql_args) return await self.db_fetch(sql, sql_args) async def db_set(self, columns: t.List[str], values: t.List[str]) -> None: """ This method serves as an abstraction layer from using SQL syntax in the top-level database table class, it runs the basic insertion (set) command without needing to use SQL syntax at all. """ sql_columns = ", ".join(columns) sql_values = ", ".join(f"${n + 1}" for n in range(len(values))) sql = f""" INSERT INTO {self.table} ({sql_columns}) VALUES ({sql_values}) """ await self.db_execute(sql, values) async def db_upsert(self, columns: t.List[str], values: t.List[str], conflict_column: str) -> None: """ This method serves as an abstraction layer from using SQL syntax in the top-level database table class, it runs the basic insert/update (upsert) command without needing to use SQL syntax at all. """ sql_columns = ", ".join(columns) sql_values = ", ".join(f"${n + 1}" for n in range(len(values))) sql_update = "" for index, column in enumerate(columns): if column != conflict_column: sql_update += f"{column}=${index + 1}" sql = f""" INSERT INTO {self.table} ({sql_columns}) VALUES ({sql_values}) ON CONFLICT ({conflict_column}) DO UPDATE SET {sql_update} """ await self.db_execute(sql, values) class Database(metaclass=Singleton): """ This is the main connection class with the postgres database. This class is here to ensure the ease of connecting and disconnecting from the database and loading the top-level database table classes. """ def __init__(self, db_parameters: dict, timeout: int = 5): required_parameters = set(["host", "database", "user", "password"]) # Make sure db_parameters contains all required keys by checking # if
#!/usr/bin/env python """Autogenerated python functions to serialize/deserialize binary messages. Generated by: ../../scripts/aisxmlbinmsg2py.py Need to then wrap these functions with the outer AIS packet and then convert the whole binary blob to a NMEA string. Those functions are not currently provided in this file. serialize: python to ais binary deserialize: ais binary to python The generated code uses translators.py, binary.py, and aisstring.py which should be packaged with the resulting files. TODO(schwehr):FIX: put in a description of the message here with fields and types. """ import doctest import sys from decimal import Decimal import unittest from aisutils.BitVector import BitVector from aisutils import aisstring from aisutils import binary from aisutils import sqlhelp from aisutils import uscg # FIX: check to see if these will be needed TrueBV = BitVector(bitstring="1") "Why always rebuild the True bit? This should speed things up a bunch" FalseBV = BitVector(bitstring="0") "Why always rebuild the False bit? This should speed things up a bunch" fieldList = ( 'vessel', 'direction', 'ETA_month', 'ETA_day', 'ETA_hour', 'ETA_min', 'reserved', ) fieldListPostgres = ( 'vessel', 'direction', 'ETA_month', 'ETA_day', 'ETA_hour', 'ETA_min', 'reserved', ) toPgFields = { } """ Go to the Postgis field names from the straight field name """ fromPgFields = { } """ Go from the Postgis field names to the straight field name """ pgTypes = { } """ Lookup table for each postgis field name to get its type. """ def encode(params, validate=False): '''Create a sls_lockschedule binary message payload to pack into an AIS Msg sls_lockschedule. Fields in params: - vessel(aisstr6): Vessel Name - direction(bool): Up bound/Down bound - ETA_month(uint): Estimated time of arrival month 1..12 - ETA_day(uint): Estimated time of arrival day of the month 1..31 - ETA_hour(uint): Estimated time of arrival UTC hours 0..23 - ETA_min(uint): Estimated time of arrival minutes - reserved(uint): Reserved bits for future use (field automatically set to "0") @param params: Dictionary of field names/values. Throws a ValueError exception if required is missing @param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented. @rtype: BitVector @return: encoded binary message (for binary messages, this needs to be wrapped in a msg 8 @note: The returned bits may not be 6 bit aligned. It is up to you to pad out the bits. ''' bvList = [] if 'vessel' in params: bvList.append(aisstring.encode(params['vessel'],90)) else: bvList.append(aisstring.encode('@@@@@@@@@@@@@@@',90)) if params["direction"]: bvList.append(TrueBV) else: bvList.append(FalseBV) bvList.append(binary.setBitVectorSize(BitVector(intVal=params['ETA_month']),4)) bvList.append(binary.setBitVectorSize(BitVector(intVal=params['ETA_day']),5)) bvList.append(binary.setBitVectorSize(BitVector(intVal=params['ETA_hour']),5)) bvList.append(binary.setBitVectorSize(BitVector(intVal=params['ETA_min']),6)) bvList.append(binary.setBitVectorSize(BitVector(intVal=0),19)) return binary.joinBV(bvList) def decode(bv, validate=False): '''Unpack a sls_lockschedule message. Fields in params: - vessel(aisstr6): Vessel Name - direction(bool): Up bound/Down bound - ETA_month(uint): Estimated time of arrival month 1..12 - ETA_day(uint): Estimated time of arrival day of the month 1..31 - ETA_hour(uint): Estimated time of arrival UTC hours 0..23 - ETA_min(uint): Estimated time of arrival minutes - reserved(uint): Reserved bits for future use (field automatically set to "0") @type bv: BitVector @param bv: Bits defining a message @param validate: Set to true to cause checking to occur. Runs slower. FIX: not implemented. @rtype: dict @return: params ''' #Would be nice to check the bit count here.. #if validate: # assert (len(bv)==FIX: SOME NUMBER) r = {} r['vessel']=aisstring.decode(bv[0:90]) r['direction']=bool(int(bv[90:91])) r['ETA_month']=int(bv[91:95]) r['ETA_day']=int(bv[95:100]) r['ETA_hour']=int(bv[100:105]) r['ETA_min']=int(bv[105:111]) r['reserved']=0 return r def decodevessel(bv, validate=False): return aisstring.decode(bv[0:90]) def decodedirection(bv, validate=False): return bool(int(bv[90:91])) def decodeETA_month(bv, validate=False): return int(bv[91:95]) def decodeETA_day(bv, validate=False): return int(bv[95:100]) def decodeETA_hour(bv, validate=False): return int(bv[100:105]) def decodeETA_min(bv, validate=False): return int(bv[105:111]) def decodereserved(bv, validate=False): return 0 def printHtml(params, out=sys.stdout): out.write("<h3>sls_lockschedule</h3>\n") out.write("<table border=\"1\">\n") out.write("<tr bgcolor=\"orange\">\n") out.write("<th align=\"left\">Field Name</th>\n") out.write("<th align=\"left\">Type</th>\n") out.write("<th align=\"left\">Value</th>\n") out.write("<th align=\"left\">Value in Lookup Table</th>\n") out.write("<th align=\"left\">Units</th>\n") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>vessel</td>\n") out.write("<td>aisstr6</td>\n") if 'vessel' in params: out.write(" <td>"+str(params['vessel'])+"</td>\n") out.write(" <td>"+str(params['vessel'])+"</td>\n") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>direction</td>\n") out.write("<td>bool</td>\n") if 'direction' in params: out.write(" <td>"+str(params['direction'])+"</td>\n") if str(params['direction']) in directionDecodeLut: out.write("<td>"+directionDecodeLut[str(params['direction'])]+"</td>") else: out.write("<td><i>Missing LUT entry</i></td>") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>ETA_month</td>\n") out.write("<td>uint</td>\n") if 'ETA_month' in params: out.write(" <td>"+str(params['ETA_month'])+"</td>\n") out.write(" <td>"+str(params['ETA_month'])+"</td>\n") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>ETA_day</td>\n") out.write("<td>uint</td>\n") if 'ETA_day' in params: out.write(" <td>"+str(params['ETA_day'])+"</td>\n") out.write(" <td>"+str(params['ETA_day'])+"</td>\n") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>ETA_hour</td>\n") out.write("<td>uint</td>\n") if 'ETA_hour' in params: out.write(" <td>"+str(params['ETA_hour'])+"</td>\n") out.write(" <td>"+str(params['ETA_hour'])+"</td>\n") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>ETA_min</td>\n") out.write("<td>uint</td>\n") if 'ETA_min' in params: out.write(" <td>"+str(params['ETA_min'])+"</td>\n") out.write(" <td>"+str(params['ETA_min'])+"</td>\n") out.write("</tr>\n") out.write("\n") out.write("<tr>\n") out.write("<td>reserved</td>\n") out.write("<td>uint</td>\n") if 'reserved' in params: out.write(" <td>"+str(params['reserved'])+"</td>\n") out.write(" <td>"+str(params['reserved'])+"</td>\n") out.write("</tr>\n") out.write("</table>\n") def printFields(params, out=sys.stdout, format='std', fieldList=None, dbType='postgres'): '''Print a sls_lockschedule message to stdout. Fields in params: - vessel(aisstr6): Vessel Name - direction(bool): Up bound/Down bound - ETA_month(uint): Estimated time of arrival month 1..12 - ETA_day(uint): Estimated time of arrival day of the month 1..31 - ETA_hour(uint): Estimated time of arrival UTC hours 0..23 - ETA_min(uint): Estimated time of arrival minutes - reserved(uint): Reserved bits for future use (field automatically set to "0") @param params: Dictionary of field names/values. @param out: File like object to write to. @rtype: stdout @return: text to out ''' if 'std'==format: out.write("sls_lockschedule:\n") if 'vessel' in params: out.write(" vessel: "+str(params['vessel'])+"\n") if 'direction' in params: out.write(" direction: "+str(params['direction'])+"\n") if 'ETA_month' in params: out.write(" ETA_month: "+str(params['ETA_month'])+"\n") if 'ETA_day' in params: out.write(" ETA_day: "+str(params['ETA_day'])+"\n") if 'ETA_hour' in params: out.write(" ETA_hour: "+str(params['ETA_hour'])+"\n") if 'ETA_min' in params: out.write(" ETA_min: "+str(params['ETA_min'])+"\n") if 'reserved' in params: out.write(" reserved: "+str(params['reserved'])+"\n") elif 'csv'==format: if None == options.fieldList: options.fieldList = fieldList needComma = False; for field in fieldList: if needComma: out.write(',') needComma = True if field in params: out.write(str(params[field])) # else: leave it empty out.write("\n") elif 'html'==format: printHtml(params,out) elif 'sql'==format: sqlInsertStr(params,out,dbType=dbType) else: print "ERROR: unknown format:",format assert False return # Nothing to return directionEncodeLut = { 'Down bound':'0', 'Up bound':'1', } #directionEncodeLut directionDecodeLut = { '0':'Down bound', '1':'Up bound', } # directionEncodeLut ###################################################################### # SQL SUPPORT ###################################################################### dbTableName='sls_lockschedule' 'Database table name' def sqlCreateStr(outfile=sys.stdout, fields=None, extraFields=None ,addCoastGuardFields=True ,dbType='postgres' ): """ Return the SQL CREATE command for this message type @param outfile: file like object to print to. @param fields: which fields to put in the create. Defaults to all. @param extraFields: A sequence of tuples containing (name,sql type) for additional fields @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres') @type addCoastGuardFields: bool @return: sql create string @rtype: str @see: sqlCreate """ # FIX: should this sqlCreate be the same as in LaTeX (createFuncName) rather than hard coded? outfile.write(str(sqlCreate(fields,extraFields,addCoastGuardFields,dbType=dbType))) def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'): """Return the sqlhelp object to create the table. @param fields: which fields to put in the create. Defaults to all. @param extraFields: A sequence of tuples containing (name,sql type) for additional fields @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format @type addCoastGuardFields: bool @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres') @return: An object that can be used to generate a return @rtype: sqlhelp.create """ if fields is None: fields = fieldList c = sqlhelp.create('sls_lockschedule',dbType=dbType) c.addPrimaryKey() if 'vessel' in fields: c.addVarChar('vessel',15) if 'direction' in fields: c.addBool('direction') if 'ETA_month' in fields: c.addInt ('ETA_month') if 'ETA_day' in fields: c.addInt ('ETA_day') if 'ETA_hour' in fields: c.addInt ('ETA_hour') if 'ETA_min' in fields: c.addInt ('ETA_min') if 'reserved' in fields: c.addInt ('reserved') if addCoastGuardFields: # c.addInt('cg_s_rssi') # Relative signal strength indicator # c.addInt('cg_d_strength') # dBm receive strength # c.addVarChar('cg_x',10) # Idonno c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T' c.addInt('cg_s_slotnum') # Slot received in c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string c.addInt('cg_sec') # UTC seconds since the epoch c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream return c def sqlInsertStr(params, outfile=sys.stdout, extraParams=None, dbType='postgres'): """ Return the SQL INSERT command for this message type @param params: dictionary of values keyed by field name @param outfile: file like object to print to. @param extraParams: A sequence of tuples containing (name,sql type) for additional fields @return: sql create string @rtype: str @see: sqlCreate """ outfile.write(str(sqlInsert(params,extraParams,dbType=dbType))) def sqlInsert(params,extraParams=None,dbType='postgres'): """ Give the SQL INSERT statement @param params: dict keyed by field name of values @param extraParams: any extra fields that you have created beyond the normal ais message fields @rtype: sqlhelp.insert @return: insert class instance TODO(schwehr):allow optional type checking of params? @warning: this will take invalid keys happily and do what??? """ i = sqlhelp.insert('sls_lockschedule',dbType=dbType) if dbType=='postgres': finished = [] for key in params: if key in
# -*- coding: utf-8 -*- """ @Module SARibbonBar @Author ROOT @brief SARibbonBar继承于QMenuBar,在SARibbonMainWindow中直接替换了原来的QMenuBar 通过setRibbonStyle()函数设置ribbon的风格 SARibbonBar参考office和wps,提供了四种风格的Ribbon模式,@ref SARibbonBar.RibbonStyle 如果想ribbon占用的空间足够小,WpsLiteStyleTwoRow模式能比OfficeStyle节省35%的高度空间 传统的Menu/ToolBar主要通过QMenu的addMenu添加菜单,通过addToolBar生成QToolBar, 再把QAction设置进QMenu和QToolBar中 SARibbonBar和传统方法相似,不过相对于传统的Menu/ToolBar QMenu和QToolBar是平级的, Ribbon是有明显的层级关系: SARibbonBar下面是 @ref SARibbonCategory, SARibbonCategory下面是@ref SARibbonPannel, SARibbonPannel下面是@ref SARibbonToolButton, SARibbonToolButton管理着QAction 生成一个ribbon只需以下几个函数: SARibbonBar.addCategoryPage(title: str) -> SARibbonCategory SARibbonCategory.addPannel(title: str) -> SARibbonPannel SARibbonPannel.addLargeAction(action: QAction) -> SARibbonToolButton SARibbonPannel.addSmallAction(action: QAction) -> SARibbonToolButton 因此生成步骤如下: @code de setupRibbonUi(): ...... # ribbonwindow为SARibbonMainWindow categoryMain = SARibbonCategory() filePannel = SARibbonPannel() ribbon = ribbonwindow.ribbonBar() ribbon.setRibbonStyle(SARibbonBar::WpsLiteStyle) # 添加一个Main标签 categoryMain = ribbon.addCategoryPage("Main") # Main标签下添加一个FilePannel filePannel = categoryMain.addPannel("FilePannel") # 开始为File Pannel添加action filePannel.addLargeAction(actionNew) filePannel.addLargeAction(actionOpen) filePannel.addLargeAction(actionSave) filePannel.addSmallAction(actionImportMesh) filePannel.addSmallAction(actionImportGeometry) ...... @endcode """ import PySARibbon.resource_rc from typing import List, Union from PyQt5.QtCore import Qt, QSize, pyqtSignal, QObject, QEvent, QRect, QPoint, QMargins from PyQt5.QtGui import QIcon, QPainter, QColor, QResizeEvent, QMouseEvent, QPen, QHoverEvent, QCursor from PyQt5.QtWidgets import QMenuBar, QAbstractButton, QApplication, QAction, QFrame, QStyle from .SAWidgets.SARibbonStackedWidget import SARibbonStackedWidget from .SAWidgets.SARibbonTabBar import SARibbonTabBar from .SATools.SARibbonElementManager import RibbonSubElementStyleOpt, RibbonSubElementDelegate from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonQuickAccessBar import SARibbonQuickAccessBar from .SARibbonPannel import SARibbonPannel from .SARibbonCategory import SARibbonCategory from .SARibbonContextCategory import SARibbonContextCategory class _SAContextCategoryManagerData: def __init__(self): self.contextCategory: SARibbonContextCategory = None self.tabPageIndex: List[int] = list() def compare(self, context: SARibbonContextCategory): return self.contextCategory == context class _SARibbonTabData: def __init__(self): self.category: SARibbonCategory = None self.index = -1 class SARibbonBarPrivate: def __init__(self, parent): self.mainClass = parent self.iconRightBorderPosition = 1 self.mContextCategoryColorListIndex = -1 # 记录contextCategory色系索引 self.ribbonStyle: int = SARibbonBar.OfficeStyle self.lastShowStyle: int = SARibbonBar.OfficeStyle self.currentRibbonMode: int = SARibbonBar.NormalRibbonMode self.applitionButton: QAbstractButton = RibbonSubElementDelegate.createRibbonApplicationButton(self.mainClass) self.ribbonTabBar: SARibbonTabBar = RibbonSubElementDelegate.createRibbonTabBar(self.mainClass) self.stackedContainerWidget: SARibbonStackedWidget = RibbonSubElementDelegate.createRibbonStackedWidget(self.mainClass) self.quickAccessBar: SARibbonQuickAccessBar = SARibbonQuickAccessBar(self.mainClass) self.minimumCaterogyAction: QAction = None self.tabBarRightSizeButtonGroupWidget: SARibbonButtonGroupWidget = None self.windowButtonSize = QSize(100, RibbonSubElementStyleOpt.titleBarHeight) self.mHidedCategoryList: List[_SARibbonTabData] = list() self.mContextCategoryList: List[SARibbonContextCategory] = list() # 存放所有的上下文标签 self.currentShowingContextCategory: List[_SAContextCategoryManagerData] = list() # contextCategory的色系 self.mContextCategoryColorList: List[QColor] = [ QColor(201, 89, 156), # 玫红 QColor(242, 203, 29), # 黄 QColor(255, 157, 0), # 橙 QColor(14, 81, 167), # 蓝 QColor(228, 0, 69), # 红 QColor(67, 148, 0), # 绿 ] def init(self): """初始化ApplicationButton, RibbonTabBar, StackedContainerWidget, QuickAccessBar相关设置""" self.applitionButton.setObjectName("objSAApplicationButton") self.applitionButton.clicked.connect(self.mainClass.applitionButtonClicked) self.ribbonTabBar.setObjectName("objSARibbonTabBar") # self.ribbonTabBar.setDrawBase(False) self.ribbonTabBar.setDrawBase(True) self.ribbonTabBar.tabBarClicked.connect(self.mainClass.onCurrentRibbonTabClicked) self.ribbonTabBar.tabBarDoubleClicked.connect(self.mainClass.onCurrentRibbonTabDoubleClicked) self.ribbonTabBar.tabMoved.connect(self.mainClass.onTabMoved) self.ribbonTabBar.currentChanged.connect(self.mainClass.onCurrentRibbonTabChanged) self.stackedContainerWidget.setObjectName("objSAStackedContainerWidget") self.stackedContainerWidget.hidWindow.connect(self.mainClass.onStackWidgetHided) self.stackedContainerWidget.installEventFilter(self.mainClass) self.quickAccessBar.setObjectName("objSARibbonQuickAccessBar") self.setNormalMode() def setApplicationButton(self, btn: QAbstractButton): if not btn: return if btn.parent() != self.mainClass: btn.setParent(self.mainClass) if not btn.objectName(): btn.setObjectName('objSAApplicationButton') btn.setVisible(True) btn.move(0, RibbonSubElementStyleOpt.titleBarHeight) self.applitionButton = btn self.applitionButton.clicked.connect(self.mainClass.applitionButtonClicked) def isContainContextCategoryInList(self, contextCategory: SARibbonContextCategory) -> bool: for iContextCategory in self.currentShowingContextCategory: if iContextCategory.compare(contextCategory): return True return False def setHideMode(self): self.currentRibbonMode = SARibbonBar.MinimumRibbonMode self.stackedContainerWidget.setPopupMode() self.stackedContainerWidget.setFocusPolicy(Qt.NoFocus) self.stackedContainerWidget.clearFocus() self.ribbonTabBar.setFocus() self.stackedContainerWidget.hide() self.mainClass.setFixedHeight(self.ribbonTabBar.geometry().bottom()) def setNormalMode(self): self.currentRibbonMode = SARibbonBar.NormalRibbonMode self.stackedContainerWidget.setNormalMode() self.stackedContainerWidget.setFocus() self.stackedContainerWidget.show() def getContextCategoryColor(self) -> QColor: if not self.mContextCategoryColorList: self.mContextCategoryColorListIndex = -1 return QColor() self.mContextCategoryColorListIndex += 1 if self.mContextCategoryColorListIndex >= len(self.mContextCategoryColorList) or self.mContextCategoryColorListIndex < 0: self.mContextCategoryColorListIndex = 0 return self.mContextCategoryColorList[self.mContextCategoryColorListIndex] class SARibbonBar(QMenuBar): def __init__(self, parent=None): super().__init__(parent) self.m_d = SARibbonBarPrivate(self) self.m_d.init() self.setRibbonStyle(SARibbonBar.OfficeStyle) if parent: parent.windowTitleChanged.connect(self.onWindowTitleChanged) parent.windowIconChanged.connect(self.onWindowIconChanged) # 重绘,若不执行此,会出现刚开始applicationButton尺寸异常 QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) @staticmethod def checkTwoRowStyle(style) -> bool: # C++为isTwoRowStyle """判断RibbonStyle是否为2行模式""" return style & 0xFF00 > 0 @staticmethod def checkOfficeStyle(style) -> bool: # C++为isOfficeStyle """判断是否是office样式""" return style & 0xFF == 0 def setTitle(self, title: str): self.applicationButton().setText(title) def applicationButton(self) -> QAbstractButton: """ 获取applicationButton """ return self.m_d.applitionButton def setApplicationButton(self, btn: QAbstractButton): """ 设置applicationButton """ self.m_d.setApplicationButton(btn) # 无论设置什么都触发resize QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) def ribbonTabBar(self) -> SARibbonTabBar: """ 获取tabbar """ return self.m_d.ribbonTabBar def addCategoryPage(self, *__args): """ 添加一个标签 addCategoryPage(self, str) -> SARibbonCategory addCategoryPage(self, SARibbonCategory) """ if len(__args) < 1: raise Exception('parameters length < 1') if isinstance(__args[0], str): title: str = __args[0] category = SARibbonCategory(self) category.setObjectName(title) category.setWindowTitle(title) self.addCategoryPage(category) return category else: category: SARibbonCategory = __args[0] category.setRibbonBar(self) mode = SARibbonPannel.TwoRowMode if self.isTwoRowStyle() else SARibbonPannel.ThreeRowMode category.setRibbonPannelLayoutMode(mode) index = self.m_d.ribbonTabBar.addTab(category.windowTitle()) tabdata = _SARibbonTabData() tabdata.category = category tabdata.index = index self.m_d.ribbonTabBar.setTabData(index, tabdata) self.m_d.stackedContainerWidget.insertWidget(index, category) category.windowTitleChanged.connect(self.onCategoryWindowTitleChanged) def insertCategoryPage(self, *__args): """在index添加一个category,如果当前category数量少于index,则插入到最后 insertCategoryPage(self, str, int) -> SARibbonCategory insertCategoryPage(self, SARibbonCategory, int) """ if len(__args) < 2: raise Exception('parameters length < 2') if isinstance(__args[0], str): title = __args[0] index = __args[1] category = SARibbonCategory(self) category.setObjectName(title) category.setWindowTitle(title) self.insertCategoryPage(category, index) return category else: category: SARibbonCategory = __args[0] index = __args[1] i = self.m_d.ribbonTabBar.insertTab(index, category.windowTitle()) mode = SARibbonPannel.TwoRowMode if self.isTwoRowStyle() else SARibbonPannel.ThreeRowMode category.setRibbonPannelLayoutMode(mode) tabdata = _SARibbonTabData() tabdata.category = category tabdata.index = i self.m_d.ribbonTabBar.setTabData(i, tabdata) self.m_d.stackedContainerWidget.insertWidget(index, category) category.windowTitleChanged.connect(self.onCategoryWindowTitleChanged) QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) def categoryByName(self, title: str) -> Union[SARibbonCategory, None]: """通过名字查找Category""" c = self.m_d.stackedContainerWidget.count() for i in range(c): w = self.m_d.stackedContainerWidget.widget(i) if w and w.windowTitle() == title: return w return None def categoryByObjectName(self, objname: str) -> Union[SARibbonCategory, None]: """通过ObjectName查找Category""" c = self.m_d.stackedContainerWidget.count() for i in range(c): w = self.m_d.stackedContainerWidget.widget(i) if w and w.objectName() == objname: return w return None def categoryByIndex(self, index: int) -> Union[SARibbonCategory, None]: """ 通过索引找到category,如果超过索引范围,会返回None """ var: _SARibbonTabData = self.m_d.ribbonTabBar.tabData(index) if var: return var.category return None def hideCategory(self, category: SARibbonCategory): """隐藏category,并不会删除或者取走,只是隐藏""" c = self.m_d.ribbonTabBar.count() for i in range(c): var: _SARibbonTabData = self.m_d.ribbonTabBar.tabData(i) if var.category == category: self.m_d.mHidedCategoryList.append(var) self.m_d.ribbonTabBar.removeTab(i) # 仅仅把tab移除 def showCategory(self, category: SARibbonCategory): """显示被隐藏的category""" for i, c in enumerate(self.m_d.mHidedCategoryList): if category == c.category: index = self.m_d.ribbonTabBar.insertTab(c.index, c.category.windowTitle()) c.index = index self.m_d.ribbonTabBar.setTabData(index, c) self.m_d.mHidedCategoryList.pop(i) return self.raiseCategory(category) def isCategoryVisible(self, category: SARibbonCategory) -> bool: """ 判断这个category是否在显示状态,也就是tabbar有这个category """ return self.categoryIndex(category) >= 0 def categoryIndex(self, category: SARibbonCategory) -> int: """ 获取category的索引 """ cnt = self.m_d.ribbonTabBar.count() for i in range(cnt): c: _SARibbonTabData = self.m_d.ribbonTabBar.tabData(i) if category == c.category: return i return -1 def moveCategory(self, fr: int, to: int): """移动一个Category从fr index到to index""" self.m_d.ribbonTabBar.moveTab(fr, to) # 这时要刷新所有tabdata的index信息 cnt = self.m_d.ribbonTabBar.count() for i in range(cnt): c: _SARibbonTabData = self.m_d.ribbonTabBar.tabData(i) c.index = i self.m_d.ribbonTabBar.setTabData(i, c) # 这里会触发tabMoved信号,在tabMoved信号中调整stacked里窗口的位置 def categoryPages(self, allGet: bool=True) -> List[SARibbonCategory]: """ 获取当前显示的所有的SARibbonCategory """ res = list() cnt = self.m_d.stackedContainerWidget.count() for i in range(cnt): w = self.m_d.stackedContainerWidget.widget(i) if not allGet and w.isContextCategory(): continue res.append(w) return res def removeCategory(self, category: SARibbonCategory): """移除SARibbonCategory,表现在tabbar会移除,面板会移除""" index = self.tabIndex(category) if index >= 0: self.m_d.ribbonTabBar.removeTab(index) self.m_d.stackedContainerWidget.removeWidget(category) # 同时验证这个category是否是contexcategory里的 for c in self.m_d.mContextCategoryList: c.takeCategory(category) self.updateContextCategoryManagerData() # 移除完后需要重绘 self.repaint() QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) def addContextCategory(self, *_args) -> Union[None, SARibbonContextCategory]: """添加一个上下文标签 addContextCategory(self, str, color=None, id=None) -> SARibbonContextCategory addContextCategory(self, SARibbonContextCategory) """ if len(_args) < 1: raise Exception('parameters length < 1') if isinstance(_args[0], str): title: str = _args[0] color: QColor = None if len(_args) < 2 else _args[1] index = None if len(_args) < 3 else _args[2] if not(color and color.isValid()): color = self.m_d.getContextCategoryColor() context = SARibbonContextCategory(self) context.setObjectName(title) context.setContextTitle(title) context.setContextColor(color) context.setId(index) self.addContextCategory(context) return context else: context: SARibbonContextCategory = _args[0] context.categoryPageAdded.connect(self.onContextsCategoryPageAdded) # remove并没有绑定,主要是remove后在stacked里也不会显示,remove且delete的话,stacked里也会删除 if self.currentRibbonStyle() == SARibbonBar.WpsLiteStyle: self.resizeInWpsLiteStyle() self.m_d.mContextCategoryList.append(context) def showContextCategory(self, context: SARibbonContextCategory): """显示上下文标签""" if self.isContextCategoryVisible(context): return contextCategoryData = _SAContextCategoryManagerData() contextCategoryData.contextCategory = context for i in range(context.categoryCount()): category = context.categoryPage(i) mode = SARibbonPannel.TwoRowMode if self.isTwoRowStyle() else SARibbonPannel.ThreeRowMode category.setRibbonPannelLayoutMode(mode) index = self.m_d.ribbonTabBar.addTab(category.windowTitle()) contextCategoryData.tabPageIndex.append(index) tabdata = _SARibbonTabData() tabdata.category = category tabdata.index = index self.m_d.ribbonTabBar.setTabData(index, tabdata) self.m_d.currentShowingContextCategory.append(contextCategoryData) QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) self.repaint() def hideContextCategory(self, context: SARibbonContextCategory): """隐藏上下文标签""" ishide = False for i, category in enumerate(self.m_d.currentShowingContextCategory): if context.compare(category.contextCategory): indexs = category.tabPageIndex for index in reversed(indexs): self.m_d.ribbonTabBar.removeTab(index) self.m_d.currentShowingContextCategory.pop(i) ishide = True if ishide: QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) self.repaint() def isContextCategoryVisible(self, context: SARibbonContextCategory) -> bool: """ 判断上下文是否在显示状态 """ return self.m_d.isContainContextCategoryInList(context) def setContextCategoryVisible(self, context: SARibbonContextCategory, visible: bool): """设置上下文标签的显示状态""" if visible: self.showContextCategory(context) else: self.hideContextCategory(context) def contextCategoryList(self) -> List[SARibbonContextCategory]: """ 获取所有的上下文标签 """ return self.m_d.mContextCategoryList def destroyContextCategory(self, context: SARibbonContextCategory): """销毁上下文标签,上下文标签的SARibbonCategory也会随之销毁""" # 如果上下文标签显示中,先隐藏 if self.isContextCategoryVisible(context): self.hideContextCategory(context) # 删除上下文标签的相关内容 self.m_d.mContextCategoryList.remove(context) # C++是removeAll # self.m_d.mContextCategoryList = [c for c in self.m_d.mContextCategoryList if c != context] res = context.categoryList() for c in res: c.hide() c.deleteLater() context.deleteLater() QApplication.postEvent(self, QResizeEvent(self.size(), self.size())) def setMinimumMode(self, isMinimum: bool): """设置为最小/正常模式 默认下双击tabbar会切换隐藏显示模式,如果想禁用此功能, 可重载onCurrentRibbonTabDoubleClicked() 函数,不对函数进行任何处理即可 """ if isMinimum: self.m_d.setHideMode() else: self.m_d.setNormalMode() QApplication.sendEvent(self, QResizeEvent(self.size(), self.size())) def isMinimumMode(self) -> bool: """当前ribbon是否是隐藏模式""" return self.m_d.stackedContainerWidget.isPopupMode() def showMinimumModeButton(self, isShow=True): """设置显示隐藏ribbon按钮""" if isShow: rightBar = self.activeTabBarRightButtonGroup() if not self.m_d.minimumCaterogyAction: panBtn = RibbonSubElementDelegate.createHidePannelButton(self) panBtn.ensurePolished() # 载入样式图标 icon = QStyle.SP_TitleBarShadeButton if self.isMinimumMode() else QStyle.SP_TitleBarUnshadeButton action = QAction(self.style().standardIcon(icon), 'Hide', panBtn) action.setCheckable(True) action.triggered.connect(lambda on: self.setMinimumMode(on)) panBtn.setDefaultAction(action) self.m_d.minimumCaterogyAction = rightBar.addWidget(panBtn) self.update() else: if self.m_d.minimumCaterogyAction: self.m_d.tabBarRightSizeButtonGroupWidget.hideWidget(self.m_d.minimumCaterogyAction) self.m_d.minimumCaterogyAction = None QApplication.sendEvent(self, QResizeEvent(self.size(), self.size())) def haveShowMinimumModeButton(self) -> bool: """是否显示隐藏ribbon按钮""" ret = self.m_d.minimumCaterogyAction is None return not ret def tabBarHeight(self) -> int: """ribbon tab的高度""" return RibbonSubElementStyleOpt.tabBarHeight def titleBarHeight(self) -> int: """ribbon title的高度""" return RibbonSubElementStyleOpt.titleBarHeight def activeTabBarRightButtonGroup(self) -> SARibbonButtonGroupWidget: """激活tabbar右边的按钮群""" if not self.m_d.tabBarRightSizeButtonGroupWidget: self.m_d.tabBarRightSizeButtonGroupWidget = SARibbonButtonGroupWidget(self) self.m_d.tabBarRightSizeButtonGroupWidget.setFrameShape(QFrame.NoFrame) self.m_d.tabBarRightSizeButtonGroupWidget.show() if not self.m_d.tabBarRightSizeButtonGroupWidget.isVisible(): self.m_d.tabBarRightSizeButtonGroupWidget.setVisible(True) return self.m_d.tabBarRightSizeButtonGroupWidget def quickAccessBar(self) -> SARibbonQuickAccessBar: """快速响应栏""" return self.m_d.quickAccessBar def setRibbonStyle(self, style): """设置ribbonbar的风格,此函数会重新设置所有元素""" self.m_d.ribbonStyle = style self.m_d.lastShowStyle = style self.updateRibbonElementGeometry() oldSize = self.size() newSize = QSize(oldSize.width(), self.mainBarHeight()) QApplication.sendEvent(self, QResizeEvent(newSize, oldSize)) if SARibbonBar.MinimumRibbonMode == self.currentRibbonState(): # 处于最小模式下时,bar的高度为tabbar的bottom,这个调整必须在resize event之后 self.setFixedHeight(self.m_d.ribbonTabBar.geometry().bottom()) def currentRibbonStyle(self) -> int: """返回当前ribbon的风格""" return self.m_d.ribbonStyle def currentRibbonState(self) -> int: """当前的模式""" return self.m_d.currentRibbonMode def setCurrentIndex(self, index: int): """设置当前ribbon的index""" self.m_d.ribbonTabBar.setCurrentIndex(index) def currentIndex(self) -> int: """返回当前的tab索引""" return self.m_d.ribbonTabBar.currentIndex() def raiseCategory(self, category: SARibbonCategory): """确保标签显示出来,tab并切换到对应页""" index = self.m_d.stackedContainerWidget.indexOf(category) if index >= 0: self.setCurrentIndex(index) def isTwoRowStyle(self) ->
# Copyright 2020 StrongDM Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file was generated by protogen. DO NOT EDIT. import collections class CreateResponseMetadata: """CreateResponseMetadata is reserved for future use.""" __slots__ = [] def __init__(self, ): pass def __repr__(self): return '<sdm.CreateResponseMetadata ' + \ '>' def to_dict(self): return {} @classmethod def from_dict(cls, d): return cls() class GetResponseMetadata: """GetResponseMetadata is reserved for future use.""" __slots__ = [] def __init__(self, ): pass def __repr__(self): return '<sdm.GetResponseMetadata ' + \ '>' def to_dict(self): return {} @classmethod def from_dict(cls, d): return cls() class UpdateResponseMetadata: """UpdateResponseMetadata is reserved for future use.""" __slots__ = [] def __init__(self, ): pass def __repr__(self): return '<sdm.UpdateResponseMetadata ' + \ '>' def to_dict(self): return {} @classmethod def from_dict(cls, d): return cls() class DeleteResponseMetadata: """DeleteResponseMetadata is reserved for future use.""" __slots__ = [] def __init__(self, ): pass def __repr__(self): return '<sdm.DeleteResponseMetadata ' + \ '>' def to_dict(self): return {} @classmethod def from_dict(cls, d): return cls() class RateLimitMetadata: """RateLimitMetadata contains information about remaining requests avaialable to the user over some timeframe. :param limit: How many total requests the user/token is authorized to make before being rate limited. :param remaining: How many remaining requests out of the limit are still avaialable. :param reset_at: The time when remaining will be reset to limit. :param bucket: The bucket this user/token is associated with, which may be shared between multiple users/tokens. """ __slots__ = [ 'limit', 'remaining', 'reset_at', 'bucket', ] def __init__( self, limit=None, remaining=None, reset_at=None, bucket=None, ): self.limit = limit self.remaining = remaining self.reset_at = reset_at self.bucket = bucket def __repr__(self): return '<sdm.RateLimitMetadata ' + \ 'limit: ' + repr(self.limit) + ' ' +\ 'remaining: ' + repr(self.remaining) + ' ' +\ 'reset_at: ' + repr(self.reset_at) + ' ' +\ 'bucket: ' + repr(self.bucket) + ' ' +\ '>' def to_dict(self): return { 'limit': self.limit, 'remaining': self.remaining, 'reset_at': self.reset_at, 'bucket': self.bucket, } @classmethod def from_dict(cls, d): return cls( limit=d.get('limit'), remaining=d.get('remaining'), reset_at=d.get('reset_at'), bucket=d.get('bucket'), ) class Tag: """ :param name: :param value: """ __slots__ = [ 'name', 'value', ] def __init__( self, name=None, value=None, ): self.name = name self.value = value def __repr__(self): return '<sdm.Tag ' + \ 'name: ' + repr(self.name) + ' ' +\ 'value: ' + repr(self.value) + ' ' +\ '>' def to_dict(self): return { 'name': self.name, 'value': self.value, } @classmethod def from_dict(cls, d): return cls( name=d.get('name'), value=d.get('value'), ) class AccountAttachmentCreateResponse: """AccountAttachmentCreateResponse reports how the AccountAttachments were created in the system. :param meta: Reserved for future use. :param account_attachment: The created AccountAttachment. :param rate_limit: Rate limit information. """ __slots__ = [ 'meta', 'account_attachment', 'rate_limit', ] def __init__( self, meta=None, account_attachment=None, rate_limit=None, ): self.meta = meta self.account_attachment = account_attachment self.rate_limit = rate_limit def __repr__(self): return '<sdm.AccountAttachmentCreateResponse ' + \ 'meta: ' + repr(self.meta) + ' ' +\ 'account_attachment: ' + repr(self.account_attachment) + ' ' +\ 'rate_limit: ' + repr(self.rate_limit) + ' ' +\ '>' def to_dict(self): return { 'meta': self.meta, 'account_attachment': self.account_attachment, 'rate_limit': self.rate_limit, } @classmethod def from_dict(cls, d): return cls( meta=d.get('meta'), account_attachment=d.get('account_attachment'), rate_limit=d.get('rate_limit'), ) class AccountAttachmentGetResponse: """AccountAttachmentGetResponse returns a requested AccountAttachment. :param meta: Reserved for future use. :param account_attachment: The requested AccountAttachment. :param rate_limit: Rate limit information. """ __slots__ = [ 'meta', 'account_attachment', 'rate_limit', ] def __init__( self, meta=None, account_attachment=None, rate_limit=None, ): self.meta = meta self.account_attachment = account_attachment self.rate_limit = rate_limit def __repr__(self): return '<sdm.AccountAttachmentGetResponse ' + \ 'meta: ' + repr(self.meta) + ' ' +\ 'account_attachment: ' + repr(self.account_attachment) + ' ' +\ 'rate_limit: ' + repr(self.rate_limit) + ' ' +\ '>' def to_dict(self): return { 'meta': self.meta, 'account_attachment': self.account_attachment, 'rate_limit': self.rate_limit, } @classmethod def from_dict(cls, d): return cls( meta=d.get('meta'), account_attachment=d.get('account_attachment'), rate_limit=d.get('rate_limit'), ) class AccountAttachmentDeleteResponse: """AccountAttachmentDeleteResponse returns information about a AccountAttachment that was deleted. :param meta: Reserved for future use. :param rate_limit: Rate limit information. """ __slots__ = [ 'meta', 'rate_limit', ] def __init__( self, meta=None, rate_limit=None, ): self.meta = meta self.rate_limit = rate_limit def __repr__(self): return '<sdm.AccountAttachmentDeleteResponse ' + \ 'meta: ' + repr(self.meta) + ' ' +\ 'rate_limit: ' + repr(self.rate_limit) + ' ' +\ '>' def to_dict(self): return { 'meta': self.meta, 'rate_limit': self.rate_limit, } @classmethod def from_dict(cls, d): return cls( meta=d.get('meta'), rate_limit=d.get('rate_limit'), ) class AccountAttachment: """AccountAttachments assign an account to a role or composite role. :param id: Unique identifier of the AccountAttachment. :param account_id: The id of the account of this AccountAttachment. :param role_id: The id of the attached role of this AccountAttachment. """ __slots__ = [ 'id', 'account_id', 'role_id', ] def __init__( self, id=None, account_id=None, role_id=None, ): self.id = id self.account_id = account_id self.role_id = role_id def __repr__(self): return '<sdm.AccountAttachment ' + \ 'id: ' + repr(self.id) + ' ' +\ 'account_id: ' + repr(self.account_id) + ' ' +\ 'role_id: ' + repr(self.role_id) + ' ' +\ '>' def to_dict(self): return { 'id': self.id, 'account_id': self.account_id, 'role_id': self.role_id, } @classmethod def from_dict(cls, d): return cls( id=d.get('id'), account_id=d.get('account_id'), role_id=d.get('role_id'), ) class AccountGrantCreateResponse: """AccountGrantCreateResponse reports how the AccountGrants were created in the system. :param meta: Reserved for future use. :param account_grant: The created AccountGrant. :param rate_limit: Rate limit information. """ __slots__ = [ 'meta', 'account_grant', 'rate_limit', ] def __init__( self, meta=None, account_grant=None, rate_limit=None, ): self.meta = meta self.account_grant = account_grant self.rate_limit = rate_limit def __repr__(self): return '<sdm.AccountGrantCreateResponse ' + \ 'meta: ' + repr(self.meta) + ' ' +\ 'account_grant: ' + repr(self.account_grant) + ' ' +\ 'rate_limit: ' + repr(self.rate_limit) + ' ' +\ '>' def to_dict(self): return { 'meta': self.meta, 'account_grant': self.account_grant, 'rate_limit': self.rate_limit, } @classmethod def from_dict(cls, d): return cls( meta=d.get('meta'), account_grant=d.get('account_grant'), rate_limit=d.get('rate_limit'), ) class AccountGrantGetResponse: """AccountGrantGetResponse returns a requested AccountGrant. :param meta: Reserved for future use. :param account_grant: The requested AccountGrant. :param rate_limit: Rate limit information. """ __slots__ = [ 'meta', 'account_grant', 'rate_limit', ] def __init__( self, meta=None, account_grant=None, rate_limit=None, ): self.meta = meta self.account_grant = account_grant self.rate_limit = rate_limit def __repr__(self): return '<sdm.AccountGrantGetResponse ' + \ 'meta: ' + repr(self.meta) + ' ' +\ 'account_grant: ' + repr(self.account_grant) + ' ' +\ 'rate_limit: ' + repr(self.rate_limit) + ' ' +\ '>' def to_dict(self): return { 'meta': self.meta, 'account_grant': self.account_grant, 'rate_limit': self.rate_limit, } @classmethod def from_dict(cls, d): return cls( meta=d.get('meta'), account_grant=d.get('account_grant'), rate_limit=d.get('rate_limit'), ) class AccountGrantDeleteResponse: """AccountGrantDeleteResponse returns information about a AccountGrant that was deleted. :param meta: Reserved for future use. :param rate_limit: Rate limit information. """ __slots__ = [ 'meta', 'rate_limit', ] def __init__( self, meta=None, rate_limit=None, ): self.meta = meta self.rate_limit = rate_limit def __repr__(self): return '<sdm.AccountGrantDeleteResponse ' + \ 'meta: ' + repr(self.meta) + ' ' +\ 'rate_limit: ' + repr(self.rate_limit) + ' ' +\ '>' def to_dict(self): return { 'meta': self.meta, 'rate_limit': self.rate_limit, } @classmethod def from_dict(cls, d): return cls( meta=d.get('meta'), rate_limit=d.get('rate_limit'), ) class AccountGrant: """AccountGrants connect a resource directly to an account, giving the account the permission to connect to that resource. :param id: Unique identifier of the AccountGrant. :param resource_id: The id of the composite role of this AccountGrant. :param account_id: The id of the attached role of this AccountGrant. :param start_from: The timestamp when the resource will be granted. Optional. Both start_at and end_at must be defined together, or not defined at all. :param valid_until: The timestamp when the resource grant will expire. Optional. Both start_at and end_at must be defined together, or not defined at all. """ __slots__ = [ 'id', 'resource_id', 'account_id', 'start_from', 'valid_until', ] def __init__( self, id=None, resource_id=None, account_id=None, start_from=None, valid_until=None, ): self.id = id self.resource_id = resource_id self.account_id = account_id self.start_from = start_from self.valid_until = valid_until def __repr__(self): return '<sdm.AccountGrant ' + \ 'id: ' + repr(self.id)
volume_nodes.GetNextItemAsObject() while volume_node: if 'M_Y4' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() elif 8 < ageLabel <= 15: VolumePath = os.path.join(ModuleDir, 'Templates', 'M_Y9.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'M_Y9' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() else: VolumePath = os.path.join(ModuleDir, 'Templates', 'M_Y15.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'M_Y15' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() else: # Female patient if 0 <= ageLabel <= 1: VolumePath = os.path.join(ModuleDir, 'Templates', 'F_M7.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'F_M7' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() elif 1 < ageLabel <= 3: VolumePath = os.path.join(ModuleDir, 'Templates', 'F_Y2.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'F_Y2' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() elif 3 < ageLabel <= 8: VolumePath = os.path.join(ModuleDir, 'Templates', 'F_Y4.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'F_Y4' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() elif 8 < ageLabel <= 13: VolumePath = os.path.join(ModuleDir, 'Templates', 'F_Y9.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'F_Y9' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() else: VolumePath = os.path.join(ModuleDir, 'Templates', 'F_Y15.nrrd') slicer.util.loadVolume(VolumePath) volume_nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLVolumeNode') volume_nodes.UnRegister(slicer.mrmlScene) volume_nodes.InitTraversal() volume_node = volume_nodes.GetNextItemAsObject() while volume_node: if 'F_Y15' in volume_node.GetName(): VolumeNode = volume_node break volume_node = volume_nodes.GetNextItemAsObject() # # # fixedID = self.volumeDialogSelectors['Fixed'].currentNodeID # movingID = self.volumeDialogSelectors['Moving'].currentNodeID movingID = VolumeNode.GetID() if fixedID and movingID: self.volumeSelectors['Fixed'].setCurrentNodeID(fixedID) self.volumeSelectors['Moving'].setCurrentNodeID(movingID) fix = self.volumeSelectors['Fixed'].currentNode() self.FixOrigin = fix.GetOrigin() # extraction of the origin of the original MRI mov = self.volumeSelectors['Moving'].currentNode() # IMAG2: center fixed and moving volumes VolumesLogic = slicer.modules.volumes.logic() try: temp = slicer.util.getNode(fix.GetName()) except slicer.util.MRMLNodeNotFoundException: temp = None VolumesLogic.CenterVolume(temp) try: temp = slicer.util.getNode(mov.GetName()) except slicer.util.MRMLNodeNotFoundException: temp = None VolumesLogic.CenterVolume(temp) # create transform and transformed if needed transform = self.transformSelector.currentNode() if not transform: self.transformSelector.addNode() transform = self.transformSelector.currentNode() transformed = self.volumeSelectors['Transformed'].currentNode() if not transformed: volumesLogic = slicer.modules.volumes.logic() moving = self.volumeSelectors['Moving'].currentNode() transformedName = "%s-transformed" % moving.GetName() try: transformed = slicer.util.getNode(transformedName) except slicer.util.MRMLNodeNotFoundException: transformed = None if not transformed: transformed = volumesLogic.CloneVolume(slicer.mrmlScene, moving, transformedName) transformed.SetAndObserveTransformNodeID(transform.GetID()) self.volumeSelectors['Transformed'].setCurrentNode(transformed) self.onLayout() self.interfaceFrame.enabled = True # IMAG2: Set red lookup table and change contrast of the transformed image displayNode = transformed.GetDisplayNode() displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeRed') displayNode.AutoWindowLevelOff() # displayNode.SetThreshold(1,1) displayNode.SetWindowLevel(0.2, 1) def cleanup(self): self.removeObservers() self.landmarksWidget.removeLandmarkObservers() def addObservers(self): """Observe the mrml scene for changes that we wish to respond to. scene observer: - whenever a new node is added, check if it was a new fiducial. if so, transform it into a landmark by creating a matching fiducial for other volumes fiducial obserers: - when fiducials are manipulated, perform (or schedule) an update to the currently active registration method. """ tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeAddedEvent, self.landmarksWidget.requestNodeAddedUpdate) self.observerTags.append((slicer.mrmlScene, tag)) tag = slicer.mrmlScene.AddObserver(slicer.mrmlScene.NodeRemovedEvent, self.landmarksWidget.requestNodeAddedUpdate) self.observerTags.append((slicer.mrmlScene, tag)) def removeObservers(self): """Remove observers and any other cleanup needed to disconnect from the scene""" for obj, tag in self.observerTags: obj.RemoveObserver(tag) self.observerTags = [] def registationState(self): """Return an instance of RegistrationState populated with current gui parameters""" state = RegistrationLib.RegistrationState() state.logic = self.logic state.fixed = self.volumeSelectors["Fixed"].currentNode() state.moving = self.volumeSelectors["Moving"].currentNode() state.transformed = self.volumeSelectors["Transformed"].currentNode() state.fixedFiducials = self.logic.volumeFiducialList(state.fixed) state.movingFiducials = self.logic.volumeFiducialList(state.moving) state.transformedFiducials = self.logic.volumeFiducialList(state.transformed) state.transform = self.transformSelector.currentNode() state.currentLandmarkName = self.landmarksWidget.selectedLandmark return (state) def currentVolumeNodes(self): """List of currently selected volume nodes""" volumeNodes = [] for selector in self.volumeSelectors.values(): volumeNode = selector.currentNode() if volumeNode: volumeNodes.append(volumeNode) return (volumeNodes) def onVolumeNodeSelect(self): """When one of the volume selectors is changed""" volumeNodes = self.currentVolumeNodes() self.landmarksWidget.setVolumeNodes(volumeNodes) fixed = self.volumeSelectors['Fixed'].currentNode() moving = self.volumeSelectors['Moving'].currentNode() transformed = self.volumeSelectors['Transformed'].currentNode() self.registrationCollapsibleButton.enabled = bool(fixed and moving) self.logic.hiddenFiducialVolumes = (transformed,) def onLayout(self, layoutMode="Axi/Sag/Cor", volumesToShow=None): """When the layout is changed by the VisualizationWidget volumesToShow: list of the volumes to include, None means include all """ volumeNodes = [] activeViewNames = [] for viewName in self.viewNames: volumeNode = self.volumeSelectors[viewName].currentNode() if volumeNode and not (volumesToShow and viewName not in volumesToShow): volumeNodes.append(volumeNode) activeViewNames.append(viewName) import CompareVolumes compareLogic = CompareVolumes.CompareVolumesLogic() oneViewModes = ('Axial', 'Sagittal', 'Coronal',) if layoutMode in oneViewModes: self.sliceNodesByViewName = compareLogic.viewerPerVolume(volumeNodes, viewNames=activeViewNames, orientation=layoutMode) elif layoutMode == 'Axi/Sag/Cor': self.sliceNodesByViewName = compareLogic.viewersPerVolume(volumeNodes) self.overlayFixedOnTransformed() self.updateSliceNodesByVolumeID() self.onLandmarkPicked(self.landmarksWidget.selectedLandmark) def overlayFixedOnTransformed(self): """If there are viewers showing the transformed volume in the background, make the foreground volume be the fixed volume and set opacity to 0.5""" fixedNode = self.volumeSelectors['Fixed'].currentNode() transformedNode = self.volumeSelectors['Transformed'].currentNode() if transformedNode: compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*') for compositeNode in compositeNodes.values(): if compositeNode.GetBackgroundVolumeID() == transformedNode.GetID(): compositeNode.SetForegroundVolumeID(fixedNode.GetID()) compositeNode.SetForegroundOpacity(0.5) def onRegistrationType(self, pickedRegistrationType): """Pick which registration type to display""" if self.currentRegistrationInterface: self.currentRegistrationInterface.destroy() interfaceClass = slicer.modules.registrationPlugins[pickedRegistrationType] self.currentRegistrationInterface = interfaceClass(self.registrationCollapsibleButton) # argument registationState is a callable that gets current state self.currentRegistrationInterface.create(self.registationState) self.currentRegistrationInterface.onLandmarkEndMoving(self.registationState) def onLocalRefinementMethod(self, pickedLocalRefinementMethod): """Pick which local refinement method to display""" if self.currentLocalRefinementInterface: self.currentLocalRefinementInterface.destroy() interfaceClass = slicer.modules.registrationPlugins[pickedLocalRefinementMethod] self.currentLocalRefinementInterface = interfaceClass(self.localRefinementCollapsibleButton) # argument registrationState is a callable that gets current state, current same instance is shared for registration and local refinement self.currentLocalRefinementInterface.create(self.registationState) def updateSliceNodesByVolumeID(self): """Build a mapping to a list of slice nodes node that are currently displaying a given volumeID""" compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*') self.sliceNodesByVolumeID = {} if self.sliceNodesByViewName: for sliceNode in self.sliceNodesByViewName.values(): for compositeNode in compositeNodes.values(): if compositeNode.GetLayoutName() == sliceNode.GetLayoutName(): volumeID = compositeNode.GetBackgroundVolumeID() if self.sliceNodesByVolumeID.has_key(volumeID): self.sliceNodesByVolumeID[volumeID].append(sliceNode) else: self.sliceNodesByVolumeID[volumeID] = [sliceNode, ] def restrictLandmarksToViews(self): """Set fiducials so they only show up in the view for the volume on which they were defined. Also turn off other fiducial lists, since leaving them visible can interfere with picking.""" volumeNodes = self.currentVolumeNodes() if self.sliceNodesByViewName: landmarks = self.logic.landmarksForVolumes(volumeNodes) activeFiducialLists = [] for landmarkName in landmarks: for fiducialList, index in landmarks[landmarkName]: activeFiducialLists.append(fiducialList) displayNode = fiducialList.GetDisplayNode() displayNode.RemoveAllViewNodeIDs() volumeNodeID = fiducialList.GetAttribute("AssociatedNodeID") if volumeNodeID: if self.sliceNodesByVolumeID.has_key(volumeNodeID): for sliceNode in self.sliceNodesByVolumeID[volumeNodeID]: displayNode.AddViewNodeID(sliceNode.GetID()) for hiddenVolume in self.logic.hiddenFiducialVolumes: if hiddenVolume and volumeNodeID == hiddenVolume.GetID(): displayNode.SetVisibility(False) allFiducialLists = slicer.util.getNodes('vtkMRMLMarkupsFiducialNode').values() for fiducialList in allFiducialLists: if fiducialList not in activeFiducialLists: displayNode = fiducialList.GetDisplayNode() if displayNode: displayNode.SetVisibility(False) displayNode.RemoveAllViewNodeIDs() displayNode.AddViewNodeID("__invalid_view_id__") def onLocalRefineClicked(self): """Refine the selected landmark""" timing = True slicer.mrmlScene.StartState(slicer.mrmlScene.BatchProcessState) if self.landmarksWidget.selectedLandmark != None: if self.currentLocalRefinementInterface: state = self.registationState() self.currentLocalRefinementInterface.refineLandmark(state) if timing: onLandmarkPickedStart = time.time() self.onLandmarkPicked(self.landmarksWidget.selectedLandmark) if timing: onLandmarkPickedEnd = time.time() if timing: print('Time to update visualization ' + str( onLandmarkPickedEnd - onLandmarkPickedStart) + ' seconds') slicer.mrmlScene.EndState(slicer.mrmlScene.BatchProcessState) def onLandmarkPicked(self, landmarkName): """Jump all slice views such that the selected landmark is visible""" if not self.landmarksWidget.movingView: # only change the fiducials if they are not being manipulated self.restrictLandmarksToViews() self.updateSliceNodesByVolumeID() volumeNodes = self.currentVolumeNodes() landmarksByName = self.logic.landmarksForVolumes(volumeNodes) if landmarksByName.has_key(landmarkName): for fiducialList, index in landmarksByName[landmarkName]: volumeNodeID = fiducialList.GetAttribute("AssociatedNodeID") if self.sliceNodesByVolumeID.has_key(volumeNodeID): point = [0, ] * 3 fiducialList.GetNthFiducialPosition(index, point) for sliceNode in self.sliceNodesByVolumeID[volumeNodeID]: if sliceNode.GetLayoutName() != self.landmarksWidget.movingView: sliceNode.JumpSliceByCentering(*point) # if landmarkName != None : # self.localRefineButton.text = 'Refine landmark ' + landmarkName # else: # self.localRefineButton.text = 'No landmark selected for refinement' def onLandmarkMoved(self, landmarkName): """Called when a landmark is moved (probably through manipulation of the widget in the slice view). This updates the active registration""" if self.currentRegistrationInterface: state = self.registationState() self.currentRegistrationInterface.onLandmarkMoved(state) def onLandmarkEndMoving(self, landmarkName): """Called when a landmark is done being moved (e.g. when mouse button released)""" if self.currentRegistrationInterface: state = self.registationState() self.currentRegistrationInterface.onLandmarkEndMoving(state) def onReload(self, moduleName="BonesSegmentation"): """Generic reload method for any scripted module. ModuleWizard will subsitute correct default moduleName. Note: customized for use in BonesSegmentation """ import imp, sys, os, slicer # first, destroy the current plugin, since it will # contain subclasses of the RegistrationLib modules if self.currentRegistrationInterface: self.currentRegistrationInterface.destroy() if self.currentLocalRefinementInterface: self.currentLocalRefinementInterface.destroy() # now reload the RegistrationLib source code # - set source file path # - load the module to the global space filePath = eval('slicer.modules.%s.path' % moduleName.lower()) p = os.path.dirname(filePath) if not sys.path.__contains__(p): sys.path.insert(0, p) for subModuleName in ("pqWidget", "Visualization", "Landmarks",): fp = open(filePath, "r") globals()[subModuleName] = imp.load_module( subModuleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE)) fp.close() # now reload all the support code and have the plugins # re-register themselves with slicer oldPlugins = slicer.modules.registrationPlugins slicer.modules.registrationPlugins = {} for plugin in oldPlugins.values(): pluginModuleName = plugin.__module__.lower() if hasattr(slicer.modules, pluginModuleName): # for a plugin from an extension, need to get the source path # from the module module = getattr(slicer.modules, pluginModuleName) sourceFile
class IceFeature(object): def __init__(self): self.gmlId = None self.geometry = None def get_geometry(self): return self.__geometry def set_geometry(self, value): self.__geometry = value def get_gml_id(self): return self.__gmlId def set_gml_id(self, value): self.__gmlId = value gmlId = property(get_gml_id, set_gml_id, None, None) class Seaice(IceFeature): def __init__(self): IceFeature.__init__(self) self.iceact = None self.iceapc = None self.icesod = None self.iceflz = None self.icespc = None self.icelvl = None self.icecst = None self.icefty = None self.icedsp = None self.iceddr = None self.icercn = None self.icerfq = None self.icermh = None self.icerxh = None self.icerdv = None self.icekcn = None self.icekfq = None self.icekmd = None self.icekxd = None self.icefcn = None self.icetck = None self.icemax = None self.icemin = None self.icetty = None self.icemlt = None self.icescn = None self.icesct = None self.icedos = None self.icelst = None self.icelfq = None self.icelor = None self.icelwd = None self.ia_sfa = None self.ia_sfb = None self.ia_sfc = None self.ia_ffa = None self.ia_ffb = None self.ia_ffc = None self.ia_sng = None self.ia_mlt = None self.ia_plg = None self.ia_hlg = None self.ia_dug = None def get_iceact(self): return self.__iceact def get_iceapc(self): return self.__iceapc def get_icesod(self): return self.__icesod def get_iceflz(self): return self.__iceflz def get_icespc(self): return self.__icespc def get_icelvl(self): return self.__icelvl def get_icecst(self): return self.__icecst def get_icefty(self): return self.__icefty def get_icedsp(self): return self.__icedsp def get_iceddr(self): return self.__iceddr def get_icercn(self): return self.__icercn def get_icerfq(self): return self.__icerfq def get_icermh(self): return self.__icermh def get_icerxh(self): return self.__icerxh def get_icerdv(self): return self.__icerdv def get_icekcn(self): return self.__icekcn def get_icekfq(self): return self.__icekfq def get_icekmd(self): return self.__icekmd def get_icekxd(self): return self.__icekxd def get_icefcn(self): return self.__icefcn def get_icetck(self): return self.__icetck def get_icemax(self): return self.__icemax def get_icemin(self): return self.__icemin def get_icetty(self): return self.__icetty def get_icemlt(self): return self.__icemlt def get_icescn(self): return self.__icescn def get_icesct(self): return self.__icesct def get_icedos(self): return self.__icedos def get_icelst(self): return self.__icelst def get_icelfq(self): return self.__icelfq def get_icelor(self): return self.__icelor def get_icelwd(self): return self.__icelwd def get_ia_sfa(self): return self.__ia_sfa def get_ia_sfb(self): return self.__ia_sfb def get_ia_sfc(self): return self.__ia_sfc def get_ia_ffa(self): return self.__ia_ffa def get_ia_ffb(self): return self.__ia_ffb def get_ia_ffc(self): return self.__ia_ffc def get_ia_sng(self): return self.__ia_sng def get_ia_mlt(self): return self.__ia_mlt def get_ia_plg(self): return self.__ia_plg def get_ia_hlg(self): return self.__ia_hlg def get_ia_dug(self): return self.__ia_dug def set_iceact(self, value): self.__iceact = value def set_iceapc(self, value): self.__iceapc = value def set_icesod(self, value): self.__icesod = value def set_iceflz(self, value): self.__iceflz = value def set_icespc(self, value): self.__icespc = value def set_icelvl(self, value): self.__icelvl = value def set_icecst(self, value): self.__icecst = value def set_icefty(self, value): self.__icefty = value def set_icedsp(self, value): self.__icedsp = value def set_iceddr(self, value): self.__iceddr = value def set_icercn(self, value): self.__icercn = value def set_icerfq(self, value): self.__icerfq = value def set_icermh(self, value): self.__icermh = value def set_icerxh(self, value): self.__icerxh = value def set_icerdv(self, value): self.__icerdv = value def set_icekcn(self, value): self.__icekcn = value def set_icekfq(self, value): self.__icekfq = value def set_icekmd(self, value): self.__icekmd = value def set_icekxd(self, value): self.__icekxd = value def set_icefcn(self, value): self.__icefcn = value def set_icetck(self, value): self.__icetck = value def set_icemax(self, value): self.__icemax = value def set_icemin(self, value): self.__icemin = value def set_icetty(self, value): self.__icetty = value def set_icemlt(self, value): self.__icemlt = value def set_icescn(self, value): self.__icescn = value def set_icesct(self, value): self.__icesct = value def set_icedos(self, value): self.__icedos = value def set_icelst(self, value): self.__icelst = value def set_icelfq(self, value): self.__icelfq = value def set_icelor(self, value): self.__icelor = value def set_icelwd(self, value): self.__icelwd = value def set_ia_sfa(self, value): self.__ia_sfa = value def set_ia_sfb(self, value): self.__ia_sfb = value def set_ia_sfc(self, value): self.__ia_sfc = value def set_ia_ffa(self, value): self.__ia_ffa = value def set_ia_ffb(self, value): self.__ia_ffb = value def set_ia_ffc(self, value): self.__ia_ffc = value def set_ia_sng(self, value): self.__ia_sng = value def set_ia_mlt(self, value): self.__ia_mlt = value def set_ia_plg(self, value): self.__ia_plg = value def set_ia_hlg(self, value): self.__ia_hlg = value def set_ia_dug(self, value): self.__ia_dug = value class Lacice(IceFeature): def __init__(self): IceFeature.__init__(self) self.iceact = None self.iceapc = None self.icelso = None self.iceflz = None self.icespc = None self.icelvl = None self.icecst = None self.icefty = None self.icedsp = None self.iceddr = None self.icercn = None self.icerfq = None self.icermh = None self.icerxh = None self.icerdv = None self.icekcn = None self.icekfq = None self.icekmd = None self.icekxd = None self.icefcn = None self.icetck = None self.icemax = None self.icemin = None self.icetty = None self.icemlt = None self.icescn = None self.icesct = None self.icedos = None self.icelst = None self.icelfq = None self.icelor = None self.icelwd = None def get_iceact(self): return self.__iceact def get_iceapc(self): return self.__iceapc def get_icelso(self): return self.__icelso def get_iceflz(self): return self.__iceflz def get_icespc(self): return self.__icespc def get_icelvl(self): return self.__icelvl def get_icecst(self): return self.__icecst def get_icefty(self): return self.__icefty def get_icedsp(self): return self.__icedsp def get_iceddr(self): return self.__iceddr def get_icercn(self): return self.__icercn def get_icerfq(self): return self.__icerfq def get_icermh(self): return self.__icermh def get_icerxh(self): return self.__icerxh def get_icerdv(self): return self.__icerdv def get_icekcn(self): return self.__icekcn def get_icekfq(self): return self.__icekfq def get_icekmd(self): return self.__icekmd def get_icekxd(self): return self.__icekxd def get_icefcn(self): return self.__icefcn def get_icetck(self): return self.__icetck def get_icemax(self): return self.__icemax def get_icemin(self): return self.__icemin def get_icetty(self): return self.__icetty def get_icemlt(self): return self.__icemlt def get_icescn(self): return self.__icescn def get_icesct(self): return self.__icesct def get_icedos(self): return self.__icedos def get_icelst(self): return self.__icelst def get_icelfq(self): return self.__icelfq def get_icelor(self): return self.__icelor def get_icelwd(self): return self.__icelwd def set_iceact(self, value): self.__iceact = value def set_iceapc(self, value): self.__iceapc = value def set_icelso(self, value): self.__icelso = value def set_iceflz(self, value): self.__iceflz = value def set_icespc(self, value): self.__icespc = value def set_icelvl(self, value): self.__icelvl = value def set_icecst(self, value): self.__icecst = value def set_icefty(self, value): self.__icefty = value def set_icedsp(self, value): self.__icedsp = value def set_iceddr(self, value): self.__iceddr = value def set_icercn(self, value): self.__icercn = value def set_icerfq(self, value): self.__icerfq = value def set_icermh(self, value): self.__icermh = value def set_icerxh(self, value): self.__icerxh = value def set_icerdv(self, value): self.__icerdv = value def set_icekcn(self, value): self.__icekcn = value def set_icekfq(self, value): self.__icekfq = value def set_icekmd(self, value): self.__icekmd = value def set_icekxd(self, value): self.__icekxd = value def set_icefcn(self, value): self.__icefcn = value def set_icetck(self, value): self.__icetck = value def set_icemax(self, value): self.__icemax = value def set_icemin(self, value): self.__icemin = value def set_icetty(self, value): self.__icetty = value def set_icemlt(self, value): self.__icemlt = value def set_icescn(self, value): self.__icescn = value def set_icesct(self, value): self.__icesct = value def set_icedos(self, value): self.__icedos = value def set_icelst(self, value): self.__icelst = value def set_icelfq(self, value): self.__icelfq = value def set_icelor(self, value): self.__icelor = value def set_icelwd(self, value): self.__icelwd = value class Brgare(IceFeature): def __init__(self): IceFeature.__init__(self) self.icebnm = None self.icebsz = None self.ia_bcn = None self.ia_bfm = None self.ia_buh = None def get_icebnm(self): return self.__icebnm def get_icebsz(self): return self.__icebsz def get_ia_bcn(self): return self.__ia_bcn def get_ia_bfm(self): return self.__ia_bfm def get_ia_buh(self): return self.__ia_buh def set_icebnm(self, value): self.__icebnm = value def set_icebsz(self, value): self.__icebsz = value def set_ia_bcn(self, value): self.__ia_bcn = value def set_ia_bfm(self, value): self.__ia_bfm = value def set_ia_buh(self, value): self.__ia_buh = value class Icelne(IceFeature): def __init__(self): IceFeature.__init__(self) class Brglne(IceFeature): def __init__(self): IceFeature.__init__(self) class Opnlne(IceFeature): def __init__(self): IceFeature.__init__(self) class Lkilne(IceFeature): def __init__(self): IceFeature.__init__(self) class I_Ridg(IceFeature): def __init__(self): IceFeature.__init__(self) self.icerdv = None self.icermh = None self.icerxh = None def get_icerdv(self): return self.__icerdv def get_icermh(self): return self.__icermh def get_icerxh(self): return self.__icerxh def set_icerdv(self, value): self.__icerdv = value def set_icermh(self, value): self.__icermh = value def set_icerxh(self, value): self.__icerxh = value class I_Lead(IceFeature): def __init__(self): IceFeature.__init__(self) self.icesod = None self.ia_obn = None self.icedvw = None self.ia_dmw = None self.ia_dxw = None def get_icesod(self): return self.__icesod def get_ia_obn(self): return self.__ia_obn def get_icedvw(self): return self.__icedvw def get_ia_dmw(self): return self.__ia_dmw def get_ia_dxw(self): return self.__ia_dxw def set_icesod(self, value): self.__icesod = value def set_ia_obn(self, value): self.__ia_obn = value def set_icedvw(self, value): self.__icedvw = value def set_ia_dmw(self, value): self.__ia_dmw = value def set_ia_dxw(self, value): self.__ia_dxw = value class I_Fral(IceFeature): def __init__(self): IceFeature.__init__(self) self.icesod = None self.ia_obn = None self.icedvw = None self.ia_dmw = None self.ia_dxw = None def get_icesod(self): return self.__icesod def get_ia_obn(self): return self.__ia_obn def get_icedvw(self): return self.__icedvw def get_ia_dmw(self): return
<reponame>Y0mingZhang/pytorch-lightning # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from torch import Tensor from torch.utils.hooks import RemovableHandle import pytorch_lightning as pl from pytorch_lightning.utilities import AMPType, DeviceType, ModelSummaryMode, rank_zero_deprecation from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8 from pytorch_lightning.utilities.warnings import WarningCache log = logging.getLogger(__name__) warning_cache = WarningCache() PARAMETER_NUM_UNITS = [" ", "K", "M", "B", "T"] UNKNOWN_SIZE = "?" class LayerSummary: """Summary class for a single layer in a :class:`~pytorch_lightning.core.lightning.LightningModule`. It collects the following information: - Type of the layer (e.g. Linear, BatchNorm1d, ...) - Input shape - Output shape - Number of parameters The input and output shapes are only known after the example input array was passed through the model. Example:: >>> model = torch.nn.Conv2d(3, 8, 3) >>> summary = LayerSummary(model) >>> summary.num_parameters 224 >>> summary.layer_type 'Conv2d' >>> output = model(torch.rand(1, 3, 5, 5)) >>> summary.in_size [1, 3, 5, 5] >>> summary.out_size [1, 8, 3, 3] Args: module: A module to summarize """ def __init__(self, module: nn.Module) -> None: super().__init__() self._module = module self._hook_handle = self._register_hook() self._in_size: Optional[Union[str, List]] = None self._out_size: Optional[Union[str, List]] = None def __del__(self) -> None: self.detach_hook() def _register_hook(self) -> Optional[RemovableHandle]: """Registers a hook on the module that computes the input- and output size(s) on the first forward pass. If the hook is called, it will remove itself from the from the module, meaning that recursive models will only record their input- and output shapes once. Registering hooks on :class:`~torch.jit.ScriptModule` is not supported. Return: A handle for the installed hook, or ``None`` if registering the hook is not possible. """ def hook(_: nn.Module, inp: Any, out: Any) -> None: if len(inp) == 1: inp = inp[0] self._in_size = parse_batch_shape(inp) self._out_size = parse_batch_shape(out) assert self._hook_handle is not None self._hook_handle.remove() handle = None if not isinstance(self._module, torch.jit.ScriptModule): handle = self._module.register_forward_hook(hook) return handle def detach_hook(self) -> None: """Removes the forward hook if it was not already removed in the forward pass. Will be called after the summary is created. """ if self._hook_handle is not None: self._hook_handle.remove() @property def in_size(self) -> Union[str, List]: return self._in_size or UNKNOWN_SIZE @property def out_size(self) -> Union[str, List]: return self._out_size or UNKNOWN_SIZE @property def layer_type(self) -> str: """Returns the class name of the module.""" return str(self._module.__class__.__name__) @property def num_parameters(self) -> int: """Returns the number of parameters in this module.""" return sum(np.prod(p.shape) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters()) class ModelSummary: """Generates a summary of all layers in a :class:`~pytorch_lightning.core.lightning.LightningModule`. Args: model: The model to summarize (also referred to as the root module). mode: Can be one of - `top` (default): only the top-level modules will be recorded (the children of the root module) - `full`: summarizes all layers and their submodules in the root module .. deprecated:: v1.4 This parameter was deprecated in v1.4 in favor of `max_depth` and will be removed in v1.6. max_depth: Maximum depth of modules to show. Use -1 to show all modules or 0 to show no summary. Defaults to 1. The string representation of this summary prints a table with columns containing the name, type and number of parameters for each layer. The root module may also have an attribute ``example_input_array`` as shown in the example below. If present, the root module will be called with it as input to determine the intermediate input- and output shapes of all layers. Supported are tensors and nested lists and tuples of tensors. All other types of inputs will be skipped and show as `?` in the summary table. The summary will also display `?` for layers not used in the forward pass. Example:: >>> import pytorch_lightning as pl >>> class LitModel(pl.LightningModule): ... ... def __init__(self): ... super().__init__() ... self.net = nn.Sequential(nn.Linear(256, 512), nn.BatchNorm1d(512)) ... self.example_input_array = torch.zeros(10, 256) # optional ... ... def forward(self, x): ... return self.net(x) ... >>> model = LitModel() >>> ModelSummary(model, max_depth=1) # doctest: +NORMALIZE_WHITESPACE | Name | Type | Params | In sizes | Out sizes ------------------------------------------------------------ 0 | net | Sequential | 132 K | [10, 256] | [10, 512] ------------------------------------------------------------ 132 K Trainable params 0 Non-trainable params 132 K Total params 0.530 Total estimated model params size (MB) >>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE | Name | Type | Params | In sizes | Out sizes -------------------------------------------------------------- 0 | net | Sequential | 132 K | [10, 256] | [10, 512] 1 | net.0 | Linear | 131 K | [10, 256] | [10, 512] 2 | net.1 | BatchNorm1d | 1.0 K | [10, 512] | [10, 512] -------------------------------------------------------------- 132 K Trainable params 0 Non-trainable params 132 K Total params 0.530 Total estimated model params size (MB) """ def __init__(self, model: "pl.LightningModule", mode: Optional[str] = None, max_depth: Optional[int] = 1) -> None: self._model = model # temporary mapping from mode to max_depth if max_depth is None or mode is not None: if mode in ModelSummaryMode.supported_types(): max_depth = ModelSummaryMode.get_max_depth(mode) rank_zero_deprecation( "Argument `mode` in `ModelSummary` is deprecated in v1.4" f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behaviour." ) else: raise MisconfigurationException( f"`mode` can be {', '.join(ModelSummaryMode.supported_types())}, got {mode}." ) if not isinstance(max_depth, int) or max_depth < -1: raise ValueError(f"`max_depth` can be -1, 0 or > 0, got {max_depth}.") self._max_depth = max_depth self._layer_summary = self.summarize() # 1 byte -> 8 bits # TODO: how do we compute precision_megabytes in case of mixed precision? precision = self._model.precision if isinstance(self._model.precision, int) else 32 self._precision_megabytes = (precision / 8.0) * 1e-6 @property def named_modules(self) -> List[Tuple[str, nn.Module]]: mods: List[Tuple[str, nn.Module]] if self._max_depth == 0: mods = [] elif self._max_depth == 1: # the children are the top-level modules mods = list(self._model.named_children()) else: mods = self._model.named_modules() mods = list(mods)[1:] # do not include root module (LightningModule) return mods @property def layer_names(self) -> List[str]: return list(self._layer_summary.keys()) @property def layer_types(self) -> List[str]: return [layer.layer_type for layer in self._layer_summary.values()] @property def in_sizes(self) -> List: return [layer.in_size for layer in self._layer_summary.values()] @property def out_sizes(self) -> List: return [layer.out_size for layer in self._layer_summary.values()] @property def param_nums(self) -> List[int]: return [layer.num_parameters for layer in self._layer_summary.values()] @property def total_parameters(self) -> int: return sum(p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters()) @property def trainable_parameters(self) -> int: return sum( p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters() if p.requires_grad ) @property def model_size(self) -> float: # todo: seems it does not work with quantized models - it returns 0.0 return self.total_parameters * self._precision_megabytes def summarize(self) -> Dict[str, LayerSummary]: summary = OrderedDict((name, LayerSummary(module)) for name, module in self.named_modules) if self._model.example_input_array is not None: self._forward_example_input() for layer in summary.values(): layer.detach_hook() if self._max_depth >= 1: # remove summary entries with depth > max_depth for k in [k for k in summary if k.count(".") >= self._max_depth]: del summary[k] return summary def _forward_example_input(self) -> None: """Run the example input through each layer to get input- and output sizes.""" model = self._model trainer = self._model.trainer input_ = model.example_input_array input_ = model._apply_batch_transfer_handler(input_) if trainer is not None and trainer.amp_backend == AMPType.NATIVE and trainer._device_type != DeviceType.TPU: model.forward = torch.cuda.amp.autocast()(model.forward) mode = model.training model.eval() with torch.no_grad(): # let the model hooks collect the input- and output shapes if isinstance(input_, (list, tuple)): model(*input_) elif isinstance(input_, dict): model(**input_) else: model(input_) model.train(mode) # restore mode of module def _get_summary_data(self) -> List[Tuple[str, List[str]]]: """Makes a summary listing with: Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model
# cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path) def test_create_volume_from_image_status_available(self): """Verify that before copying image to volume, it is in available state.""" self._create_volume_from_image('available') def test_create_volume_from_image_exception(self): """Verify that create volume from image, the volume status is 'downloading'.""" dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path) image_id = 'aaaaaaaa-0000-0000-0000-000000000000' # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'host': 'dummy'}) self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume_id, None, None, None, None, image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], "error") # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path) def test_copy_volume_to_image_status_available(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_meta = { 'id': '70a599e0-31e7-49b7-b260-868f441e862b', 'container_format': 'bare', 'disk_format': 'raw'} # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'instance_uuid': None, 'host': 'dummy'}) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_meta) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'available') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path) def test_copy_volume_to_image_status_use(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_meta = { 'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'container_format': 'bare', 'disk_format': 'raw'} # creating volume testdata volume_id = 1 db.volume_create( self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02', 'host': 'dummy'}) try: # start test self.volume.copy_volume_to_image(self.context, volume_id, image_meta) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'in-use') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path) def test_copy_volume_to_image_exception(self): dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) def fake_local_path(volume): return dst_path self.stubs.Set(self.volume.driver, 'local_path', fake_local_path) image_meta = { 'id': 'aaaaaaaa-0000-0000-0000-000000000000', 'container_format': 'bare', 'disk_format': 'raw'} # creating volume testdata volume_id = 1 db.volume_create(self.context, {'id': volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'in-use', 'host': 'dummy'}) try: # start test self.assertRaises(exception.ImageNotFound, self.volume.copy_volume_to_image, self.context, volume_id, image_meta) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], 'available') finally: # cleanup db.volume_destroy(self.context, volume_id) os.unlink(dst_path) def test_create_volume_from_exact_sized_image(self): """Verify that an image which is exactly the same size as the volume, will work correctly.""" class _FakeImageService: def __init__(self, db_driver=None, image_service=None): pass def show(self, context, image_id): return {'size': 2 * 1024 * 1024 * 1024, 'disk_format': 'raw', 'container_format': 'bare'} image_id = '70a599e0-31e7-49b7-b260-868f441e862b' try: volume_id = None volume_api = cinder.volume.api.API( image_service=_FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', image_id=1) volume_id = volume['id'] self.assertEqual(volume['status'], 'creating') finally: # cleanup db.volume_destroy(self.context, volume_id) def test_create_volume_from_oversized_image(self): """Verify that an image which is too big will fail correctly.""" class _FakeImageService: def __init__(self, db_driver=None, image_service=None): pass def show(self, context, image_id): return {'size': 2 * 1024 * 1024 * 1024 + 1, 'disk_format': 'raw', 'container_format': 'bare'} image_id = '70a599e0-31e7-49b7-b260-868f441e862b' volume_api = cinder.volume.api.API(image_service=_FakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) def _do_test_create_volume_with_size(self, size): def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit(context, reservations, project_id=None): pass def fake_rollback(context, reservations, project_id=None): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) self.stubs.Set(QUOTAS, "commit", fake_commit) self.stubs.Set(QUOTAS, "rollback", fake_rollback) volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, size, 'name', 'description') self.assertEquals(volume['size'], int(size)) def test_create_volume_int_size(self): """Test volume creation with int size.""" self._do_test_create_volume_with_size(2) def test_create_volume_string_size(self): """Test volume creation with string size.""" self._do_test_create_volume_with_size('2') def test_create_volume_with_bad_size(self): def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit(context, reservations, project_id=None): pass def fake_rollback(context, reservations, project_id=None): pass self.stubs.Set(QUOTAS, "reserve", fake_reserve) self.stubs.Set(QUOTAS, "commit", fake_commit) self.stubs.Set(QUOTAS, "rollback", fake_rollback) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '2Gb', 'name', 'description') def test_create_volume_usage_notification(self): """Ensure create volume generates appropriate usage notification""" volume = self._create_volume() volume_id = volume['id'] self.assertEquals(len(test_notifier.NOTIFICATIONS), 0) self.volume.create_volume(self.context, volume_id) self.assertEquals(len(test_notifier.NOTIFICATIONS), 2) msg = test_notifier.NOTIFICATIONS[0] self.assertEquals(msg['event_type'], 'volume.create.start') msg = test_notifier.NOTIFICATIONS[1] self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'volume.create.end') payload = msg['payload'] self.assertEquals(payload['tenant_id'], volume['project_id']) self.assertEquals(payload['user_id'], volume['user_id']) self.assertEquals(payload['volume_id'], volume['id']) self.assertEquals(payload['status'], 'creating') self.assertEquals(payload['size'], volume['size']) self.assertTrue('display_name' in payload) self.assertTrue('snapshot_id' in payload) self.assertTrue('launched_at' in payload) self.assertTrue('created_at' in payload) self.volume.delete_volume(self.context, volume_id) def test_begin_roll_detaching_volume(self): """Test begin_detaching and roll_detaching functions.""" volume = self._create_volume() volume_api = cinder.volume.api.API() volume_api.begin_detaching(self.context, volume) volume = db.volume_get(self.context, volume['id']) self.assertEqual(volume['status'], "detaching") volume_api.roll_detaching(self.context, volume) volume = db.volume_get(self.context, volume['id']) self.assertEqual(volume['status'], "in-use") def test_volume_api_update(self): # create a raw vol volume = self._create_volume() # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update(self.context, volume, update_dict) # read changes from db vol = db.volume_get(context.get_admin_context(), volume['id']) self.assertEquals(vol['display_name'], 'test update name') def test_volume_api_update_snapshot(self): # create raw snapshot volume = self._create_volume() snapshot = self._create_snapshot(volume['id']) self.assertEquals(snapshot['display_name'], None) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update_snapshot(self.context, snapshot, update_dict) # read changes from db snap = db.snapshot_get(context.get_admin_context(), snapshot['id']) self.assertEquals(snap['display_name'], 'test update name') def test_volume_get_active_by_window(self): # Find all all volumes valid within a timeframe window. try: # Not in window db.volume_create( self.context, { 'id': 1, 'host': 'devstack', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), } ) except exception.VolumeNotFound: pass try: # In - deleted in window db.volume_create( self.context, { 'id': 2, 'host': 'devstack', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), } ) except exception.VolumeNotFound: pass try: # In - deleted after window db.volume_create( self.context, { 'id': 3, 'host': 'devstack', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), } ) except exception.VolumeNotFound: pass # In - created in window db.volume_create( self.context, { 'id': 4, 'host': 'devstack', 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), } ) # Not of window. db.volume_create( self.context, { 'id': 5, 'host': 'devstack', 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), } ) volumes = db.volume_get_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1)) self.assertEqual(len(volumes), 3) self.assertEqual(volumes[0].id, u'2') self.assertEqual(volumes[1].id, u'3') self.assertEqual(volumes[2].id, u'4') def test_snapshot_get_active_by_window(self): # Find all all snapshots valid within a timeframe window. vol = db.volume_create(self.context, {'id': 1}) try: # Not in window db.snapshot_create( self.context, { 'id': 1, 'host': 'devstack', 'volume_id': 1, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), } ) except exception.SnapshotNotFound: pass try: # In - deleted in window db.snapshot_create( self.context, { 'id': 2, 'host': 'devstack', 'volume_id': 1, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), } ) except exception.SnapshotNotFound: pass try: # In - deleted after window db.snapshot_create( self.context, { 'id': 3, 'host': 'devstack', 'volume_id': 1, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), } ) except exception.SnapshotNotFound: pass # In - created in window db.snapshot_create( self.context, { 'id': 4, 'host': 'devstack', 'volume_id': 1, 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), } ) # Not of window. db.snapshot_create( self.context, { 'id': 5, 'host': 'devstack', 'volume_id': 1, 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), } ) snapshots = db.snapshot_get_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1)) self.assertEqual(len(snapshots), 3) self.assertEqual(snapshots[0].id, u'2') self.assertEqual(snapshots[1].id, u'3') self.assertEqual(snapshots[2].id, u'4') class DriverTestCase(test.TestCase): """Base Test class for Drivers.""" driver_name = "cinder.volume.driver.FakeBaseDriver" def setUp(self): super(DriverTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.flags(volume_driver=self.driver_name, volumes_dir=vol_tmpdir) self.volume = importutils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.output = "" self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target) def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None self.volume.driver.set_execute(_fake_execute) def tearDown(self): try: shutil.rmtree(FLAGS.volumes_dir) except OSError: pass super(DriverTestCase, self).tearDown() def fake_get_target(obj, iqn): return 1 def _attach_volume(self): """Attach volumes to an instance. """ return [] def _detach_volume(self, volume_id_list): """Detach volumes from an instance.""" for volume_id in volume_id_list: db.volume_detached(self.context, volume_id) self.volume.delete_volume(self.context, volume_id) class VolumeDriverTestCase(DriverTestCase): """Test case for VolumeDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" def test_delete_busy_volume(self): """Test deleting a busy volume.""" self.stubs.Set(self.volume.driver, '_volume_not_present', lambda x: False) self.stubs.Set(self.volume.driver, '_delete_volume', lambda x, y: False) # Want DriverTestCase._fake_execute to return 'o' so that # volume.driver.delete_volume() raises the VolumeIsBusy exception. self.output = 'o' self.assertRaises(exception.VolumeIsBusy, self.volume.driver.delete_volume, {'name': 'test1', 'size': 1024}) # when DriverTestCase._fake_execute returns something other than # 'o' volume.driver.delete_volume() does not raise an exception. self.output = 'x' self.volume.driver.delete_volume({'name': 'test1', 'size': 1024}) class ISCSITestCase(DriverTestCase): """Test Case for ISCSIDriver""" driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver" def _attach_volume(self): """Attach volumes to an instance. """ volume_id_list = [] for index in xrange(3): vol = {} vol['size'] = 0 vol_ref = db.volume_create(self.context, vol) self.volume.create_volume(self.context, vol_ref['id']) vol_ref = db.volume_get(self.context, vol_ref['id']) # each volume has a different
import os import glob import re import sys import socket import couchdb import logging import argparse import yaml import json import distance import operator try: import ConfigParser except ImportError: import configparser CONFIG = {} logger = logging.getLogger(__name__) def setupServer(conf): db_conf = conf['statusdb'] url="http://{0}:{1}@{2}:{3}".format(db_conf['username'], db_conf['password'], db_conf['url'], db_conf['port']) return couchdb.Server(url) def load_yaml_config(config_file): """Load YAML config file :param str config_file: The path to the configuration file. :returns: A dict of the parsed config file. :rtype: dict :raises IOError: If the config file cannot be opened. """ if type(config_file) is file: CONFIG.update(yaml.load(config_file) or {}) return CONFIG else: try: with open(config_file, 'r') as f: content = yaml.load(f) CONFIG.update(content) return content except IOError as e: e.message = "Could not open configuration file \"{}\".".format(config_file) raise e class Indexes: #indexes_by_kit looks like: #Kit_name: # i7_index1: # index_name: index_seq # ... # i5_index2: indexes_by_kit = {} #indexes looks like: #index_seq: ((index_name, index_type, kit_name), ....) indexes = {} def __init__(self, indexes_file): try: with open(indexes_file, 'r') as f: self.indexes_by_kit = yaml.load(f) except IOError as e: e.message = "Could not open configuration file \"{}\".".format(indexes_file) raise e #now create a more index centric object for kit_type in self.indexes_by_kit: #for each kit type if kit_type not in self.indexes_by_kit: print("file {} badly fomatted".format(indexes_file)) return for index_type in self.indexes_by_kit[kit_type]: # for each type of indexes for index_name, index_seq in self.indexes_by_kit[kit_type][index_type].items(): index_obj = {'name': index_name, 'index_type': index_type, 'kit_type': kit_type} self._add_index(index_seq, index_obj) #computes reverse complement def _reverse_complement(self, index): for base in index: if base not in 'ATCGNatcgn': print("Error: NOT a DNA sequence") return None complement_dict = {"A":"T", "C":"G", "G":"C", "T":"A", "N":"N", "a":"t", "c":"g", "g":"c", "t":"a", "n":"n" } return "".join([complement_dict[base] for base in reversed(index)]) #check if index exists in the indexes list def is_index(self, index): if index in self.indexes or self._reverse_complement(index) in self.indexes: return True else: return False def _add_index(self, index_seq, index_obj): index_to_modify = "" if index_seq in self.indexes: index_to_modify = index_seq elif self._reverse_complement(index_seq) in self.indexes: index_to_modify = self._reverse_complement(index_seq) else: index_to_modify = index_seq self.indexes[index_to_modify] = [] #add the information self.indexes[index_to_modify].append(index_obj) #returns all kits def return_kits(self): kits = [] for kit_type in self.indexes_by_kit: kits.append(kit_type) return kits #still to be defined def check_left_shift_conflicts(self): #checks if indexes from the same library after a left shift are conflicting for kit_type in self.indexes_by_kit: #for each lib kit type for index_type in self.indexes_by_kit[kit_type]: # for each type of indexes for index_name, index_seq in self.indexes_by_kit[kit_type][index_type].items(): fake_index = index_seq[1:] + "A" for index_name_check, index_seq_check in self.indexes_by_kit[kit_type][index_type].items(): hamming_dist = distance.hamming(index_seq_check, fake_index) if hamming_dist <= 2: print("{} {} {} {} {}".format(index_seq, index_seq_check, fake_index, hamming_dist, kit_type)) def get_FC_type(FCid): FC_type = "" if "ST-" in FCid: FC_type = "HiSeqX" elif "000000000-" in FCid: FC_type = "MiSeq" else: FC_type = "HiSeq2500" return FC_type import time from datetime import date def check_single_sample_lanes(instrument_type): couch=setupServer(CONFIG) flowcell_db = couch["x_flowcells"] flowcell_docs = {} for fc_doc in flowcell_db: try: undetermined = flowcell_db[fc_doc]["Undetermined"] except KeyError: continue flowcell_docs[flowcell_db[fc_doc]["RunInfo"]["Id"]] = fc_doc undet_stats = {} indexes = {} date_limit = date(16,3,1) for FCid in sorted(flowcell_docs): # first check that I have all necessary info to extract information fc_doc = flowcell_docs[FCid] FC_type = get_FC_type(FCid) #if a instrument type is specifed process only FCs run on that instrument if instrument_type is not None: if instrument_type != FC_type: continue instrument_name = flowcell_db[fc_doc]['RunInfo']['Instrument'] if instrument_name not in undet_stats: undet_stats[instrument_name] = {} #this is working only HiSeqX #only recent runs start_date_string = flowcell_db[fc_doc]['RunInfo']['Date'] year = start_date_string[0:2] month = start_date_string[2:4] day = start_date_string[4:6] fc_date = date(int(year), int(month), int(day)) if fc_date < date_limit: continue #understand which ones are the FCs with a single sample per lane single_sample_lanes = [] lanes = {} if 'samplesheet_csv' not in flowcell_db[fc_doc]: continue for sample in flowcell_db[fc_doc]['samplesheet_csv']: if sample['Lane'] not in lanes: lanes[sample['Lane']] = [] lanes[sample['Lane']].append(sample['index']) for lane in lanes: #if only one sample per lane if len(lanes[lane]) == 1: single_sample_lanes.append([lane, lanes[lane][0]]) #now I know what are the lanes with a single index #now collect stats for lane_index in single_sample_lanes: lane = lane_index[0] index = lane_index[1] #get percentage of undetermined if lane not in flowcell_db[fc_doc]["Undetermined"]: continue #it means this lane has no undetermined pc_undet = [sample['% of thelane'] for sample in flowcell_db[fc_doc]['illumina']['Demultiplex_Stats']['Barcode_lane_statistics'] if sample['Lane']==lane and sample['Barcode sequence']=='unknown'][0] try: pc_undet = float(pc_undet) except ValueError: #sometimes it is empty continue if pc_undet > 10: if index not in undet_stats[instrument_name]: undet_stats[instrument_name][index] = 0 #initialiaze this indexes[index] = 0 #mark this as seen undet_stats[instrument_name][index] += 1 # seen a lane with high amount of undetermined print(",", end=' ') for index in indexes: print("{},".format(index), end=' ') print("") for instrument in undet_stats: print("{},".format(instrument), end=' ') for index in indexes: if index in undet_stats[instrument]: print("{},".format(undet_stats[instrument][index]), end=' ') else: print("0,", end=' ') print("") print("") def find_undetermined_index_over_time(index_to_be_searched, instrument_type): couch=setupServer(CONFIG) flowcell_db = couch["x_flowcells"] flowcell_docs = {} for fc_doc in flowcell_db: try: undetermined = flowcell_db[fc_doc]["Undetermined"] except KeyError: continue flowcell_docs[flowcell_db[fc_doc]["RunInfo"]["Id"]] = fc_doc time_line = [] for FCid in sorted(flowcell_docs): # first check that I have all necessary info to extract information fc_doc = flowcell_docs[FCid] FC_type = get_FC_type(FCid) #if a instrument type is specifed process only FCs run on that instrument if instrument_type is not None: if instrument_type != FC_type: continue undetermined = flowcell_db[fc_doc]["Undetermined"] lanes_undet = [FCid, []] for lane in ['1','2','3','4','5','6','7','8']: if lane not in undetermined: continue index_to_be_searched_count = 0 for undetermined_index in undetermined[lane]: if index_to_be_searched in undetermined_index: index_to_be_searched_count = undetermined[lane][undetermined_index] lanes_undet[1].append([lane, index_to_be_searched_count]) if len(lanes_undet[1]) > 0: time_line.append(lanes_undet) for FC in time_line: FCid = FC[0] for lane in FC[1]: print("{}_{} {}".format(FCid, lane[0], lane[1])) def undet_index_to_projects(index_to_be_searched, instrument_type, min_occurences=0): status_db = setupServer(CONFIG) workset_db = status_db['worksets'] workset_project_view = workset_db.view('project/ws_proj') flowcell_db = status_db["x_flowcells"] flowcell_docs = {} counter = 0 projects_with_undet_in_fc_set = set() worksets_with_undet_in_fc = {} for fc_doc in flowcell_db: try: undetermined = flowcell_db[fc_doc]["Undetermined"] except KeyError: continue FCid = flowcell_db[fc_doc]["RunInfo"]["Id"] # first check that I have all necessary info to extract information FC_type = get_FC_type(FCid) #if a instrument type is specifed process only FCs run on that instrument if instrument_type is not None: if instrument_type != FC_type: continue undetermined = flowcell_db[fc_doc]["Undetermined"] for lane in ['1','2','3','4','5','6','7','8']: if lane not in undetermined: continue index_to_be_searched_count = 0 if index_to_be_searched in undetermined[lane] and undetermined[lane][index_to_be_searched] > min_occurences: name = 'SampleName' for samplesheet_entry in flowcell_db[fc_doc]["samplesheet_csv"]: if 'SampleName' not in samplesheet_entry: name = 'Sample_Name' samples_with_undet_in_lane = set([samplesheet_entry[name] for samplesheet_entry in flowcell_db[fc_doc]["samplesheet_csv"] if samplesheet_entry['Lane']==lane]) projects_with_undet_in_lane = set([samplesheet_entry[name].split("_")[0] for samplesheet_entry in flowcell_db[fc_doc]["samplesheet_csv"] if samplesheet_entry['Lane']==lane]) projects_with_undet_in_fc_set.update(projects_with_undet_in_lane) #find out which workset contains these samples for project in projects_with_undet_in_lane: #for each proejct look which workset has been involved #if len(workset_project_view[project].rows) > 1: # import pdb # pdb.set_trace() samples_with_undet_ws = {} for sample in samples_with_undet_in_lane: #now I need to figure out in which WS the samples were... might be more than one as samples might be pooled for row in workset_project_view[project].rows: ws_doc_id = row.id ws_id = list(row.value.keys())[0] #I am pretty sure that for each row I have a sinlge entry if sample in list(row.value[ws_id]['samples'].keys()): location = row.value[ws_id]['samples'][sample]['location'] #now I know that this sample in this lane in this FC was affected by index presence and I know the position if ws_id not in worksets_with_undet_in_fc: worksets_with_undet_in_fc[ws_id] = {} if FCid not in worksets_with_undet_in_fc[ws_id]: worksets_with_undet_in_fc[ws_id][FCid] = {} if lane not in worksets_with_undet_in_fc[ws_id][FCid]: worksets_with_undet_in_fc[ws_id][FCid][lane] = set() worksets_with_undet_in_fc[ws_id][FCid][lane].add((sample,location)) for ws_id in sorted(worksets_with_undet_in_fc): print(ws_id) for run_id in sorted(worksets_with_undet_in_fc[ws_id]): print("\t{}".format(run_id)) for lane in sorted(worksets_with_undet_in_fc[ws_id][run_id]): sys.stdout.write("\t\t{}: ".format(lane)) for sample_location in worksets_with_undet_in_fc[ws_id][run_id][lane]: sys.stdout.write("({},{}) ".format(sample_location[0], sample_location[1])) sys.stdout.write('\n') def fetch_undermined_stats(): #initialise couch=setupServer(CONFIG) flowcell_db = couch["x_flowcells"] #initialise counters for all FCs MostOccurringUndetIndexes = {} FC_num = 0 lanes_num = 0 MostOccurringUndetIndexes["Total"] = {} #for HiSeqX FCs FC_XTen_num = 0 lanes_Xten_num = 0 MostOccurringUndetIndexes["HiSeqX"] = {} #for MiSeq FCs FC_MiSeq_num = 0 lanes_MiSeq_num = 0 MostOccurringUndetIndexes["MiSeq"] = {} #for HiSeq2500 FCs FC_HiSeq_num = 0 lanes_HiSeq_num = 0 MostOccurringUndetIndexes["HiSeq2500"] = {} for fc_doc in sorted(flowcell_db): # first check that I have all necessary info to extract information try: undetermined = flowcell_db[fc_doc]["Undetermined"] except KeyError: continue FCid = flowcell_db[fc_doc]["RunInfo"]["Id"] FC_type = get_FC_type(FCid) FC_num += 1 if FC_type == "HiSeqX": FC_XTen_num += 1 elif FC_type == "HiSeq2500": FC_HiSeq_num += 1 elif FC_type == "MiSeq": FC_MiSeq_num += 1 #we can use the illumina Demultiplex_Stats Barcode_lane_statistics to fetch info about indexes for lane in undetermined: #for each lane if len(undetermined[lane]) > 1: # if there are elements
= { "mountTargetId": mount_target_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MountTarget") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="MountTarget") def get_snapshot(self, snapshot_id, **kwargs): """ Gets the specified snapshot's information. :param str snapshot_id: (required) The OCID of the snapshot. :param str opc_request_id: (optional) Unique identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.file_storage.models.Snapshot` :rtype: :class:`~oci.response.Response` """ resource_path = "/snapshots/{snapshotId}" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "get_snapshot got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "snapshotId": snapshot_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="Snapshot") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, response_type="Snapshot") def list_export_sets(self, compartment_id, availability_domain, **kwargs): """ Lists the export set resources in the specified compartment. :param str compartment_id: (required) The OCID of the compartment. :param str availability_domain: (required) The name of the availability domain. Example: `Uocm:PHX-AD-1` :param int limit: (optional) For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. 1 is the minimum, 1000 is the maximum. For important details about how pagination works, see `List Pagination`__. Example: `500` __ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine :param str page: (optional) For list pagination. The value of the `opc-next-page` response header from the previous \"List\" call. For important details about how pagination works, see `List Pagination`__. __ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine :param str display_name: (optional) A user-friendly name. It does not have to be unique, and it is changeable. Example: `My resource` :param str lifecycle_state: (optional) Filter results by the specified lifecycle state. Must be a valid state for the resource type. Allowed values are: "CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED" :param str id: (optional) Filter results by OCID. Must be an OCID of the correct type for the resouce type. :param str sort_by: (optional) The field to sort by. You can provide either value, but not both. By default, when you sort by time created, results are shown in descending order. When you sort by display name, results are shown in ascending order. Allowed values are: "TIMECREATED", "DISPLAYNAME" :param str sort_order: (optional) The sort order to use, either 'asc' or 'desc', where 'asc' is ascending and 'desc' is descending. The default order is 'desc' except for numeric values. Allowed values are: "ASC", "DESC" :param str opc_request_id: (optional) Unique identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.file_storage.models.ExportSetSummary` :rtype: :class:`~oci.response.Response` """ resource_path = "/exportSets" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "limit", "page", "display_name", "lifecycle_state", "id", "sort_by", "sort_order", "opc_request_id" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_export_sets got unknown kwargs: {!r}".format(extra_kwargs)) if 'lifecycle_state' in kwargs: lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED"] if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values: raise ValueError( "Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values) ) if 'sort_by' in kwargs: sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"] if kwargs['sort_by'] not in sort_by_allowed_values: raise ValueError( "Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values) ) if 'sort_order' in kwargs: sort_order_allowed_values = ["ASC", "DESC"] if kwargs['sort_order'] not in sort_order_allowed_values: raise ValueError( "Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values) ) query_params = { "compartmentId": compartment_id, "availabilityDomain": availability_domain, "limit": kwargs.get("limit", missing), "page": kwargs.get("page", missing), "displayName": kwargs.get("display_name", missing), "lifecycleState": kwargs.get("lifecycle_state", missing), "id": kwargs.get("id", missing), "sortBy": kwargs.get("sort_by", missing), "sortOrder": kwargs.get("sort_order", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[ExportSetSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[ExportSetSummary]") def list_exports(self, **kwargs): """ Lists export resources by compartment, file system, or export set. You must specify an export set ID, a file system ID, and / or a compartment ID. :param str compartment_id: (optional) The OCID of the compartment. :param int limit: (optional) For list pagination. The maximum number of results per page, or items to return in a paginated \"List\" call. 1 is the minimum, 1000 is the maximum. For important details about how pagination works, see `List Pagination`__. Example: `500` __ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine :param str page: (optional) For list pagination. The value of the `opc-next-page` response header from the previous \"List\" call. For important details about how pagination works, see `List Pagination`__. __ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine :param str export_set_id: (optional) The OCID of the export set. :param str file_system_id: (optional) The OCID of the file system. :param str lifecycle_state: (optional) Filter results by the specified lifecycle state. Must be a valid state for the resource type. Allowed values are: "CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED" :param str id: (optional) Filter results by OCID. Must be an OCID of the correct type for the resouce type. :param str sort_by: (optional) The field to sort by. You can provide either value, but not both. By default, when you sort by time created, results are shown in descending order. When you sort by path, results are shown in ascending alphanumeric order. Allowed values are: "TIMECREATED", "PATH" :param str sort_order: (optional) The sort order to use, either 'asc' or 'desc', where 'asc' is ascending and 'desc' is descending. The default order is 'desc' except for numeric values. Allowed values are: "ASC", "DESC" :param str opc_request_id: (optional) Unique identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in
<reponame>myamullaciencia/Bayesian-statistics # Bite Size Bayes Copyright 2020 <NAME> License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) import numpy as np import pandas as pd import matplotlib.pyplot as plt ## Review [In the previous notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/06_pmf.ipynb) we extended the cookie problem with more bowls and the dice problem with more dice. And I introduced an alternative to the Bayes table, a probability mass function (PMF), which is a useful way to represent and do computations with distributions. Here's the function I used to create a `Pmf`, given a sequence of quantities, `xs`, and the corresponding probabilities, `ps`. def make_pmf(xs, ps, **options): """Make a Series that represents a PMF. xs: sequence of values ps: sequence of probabilities options: keyword arguments passed to Series constructor returns: Pandas Series """ pmf = pd.Series(ps, index=xs, **options) return pmf And here's the function that performs a Bayesian update, given a sequence of likelihoods: def bayes_update(pmf, likelihood): """Do a Bayesian update. pmf: Series that represents the prior likelihood: sequence of likelihoods returns: float probability of the data """ pmf *= likelihood prob_data = pmf.sum() pmf /= prob_data return prob_data We'll use these functions to solve a new problem similar to the cookie problem. ## The Euro problem Here's a problem from <NAME>Kay's book, [*Information Theory, Inference, and Learning Algorithms*](http://www.inference.org.uk/mackay/itila/p0.html), which is the book where I first learned about Bayesian statistics. MacKay writes: > A statistical statement appeared in The Guardian on Friday January 4, 2002: > > >"When spun on edge 250 times, a Belgian one-euro coin came up heads 140 times and tails 110. ‘It looks very suspicious to me’, said <NAME>, a statistics lecturer at the London School of Economics. ‘If the coin were unbiased the chance of getting a result as extreme as that would be less than 7%’." > > But [asks MacKay] do these data give evidence that the coin is biased rather than fair? To answer this question, we have to make some modeling choices. First, let's assume that if you spin a coin on edge, there is some probability that it will land heads up. I'll call that probability $x$. Second, let's assume that $x$ varies from one coin to the next, depending on how the coin is balanced and maybe some other factors. With these assumptions we can formulate MacKay's question as an inference problem: given the data --- 140 heads and 110 tails --- what do we think $x$ is for this coin? This formulation is similar to the 101 Bowls problem we saw in the previous notebook; in fact, we will use the same likelihoods. But in the 101 Bowls problem, we are told that we choose a bowl at random, which implies that all bowls have the same prior probability. For the Euro problem, we have to think harder. What values of $x$ do you think are reasonable? It seems likely that many coins are "fair", meaning that the probability of heads is close to 50%. Do you think there are coins where $x$ is 75%? How about 90%? To be honest, I don't really know. To get started, I will assume that all values of $x$, from 0% to 100%, are equally likely. Then we'll come back and try another prior. Here's a uniform prior from 0 to 100. xs = np.arange(101) prior = 1/101 pmf = make_pmf(xs, prior) Here are the likelihoods for heads and tails: likelihood_heads = xs / 100 likelihood_tails = 1 - xs / 100 And here are the updates for 140 heads and 110 tails. for i in range(140): bayes_update(pmf, likelihood_heads) for i in range(110): bayes_update(pmf, likelihood_tails) Here's what the results look like: pmf.plot() plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails'); This curve shows the "posterior distribution" of $x$. ## Put a function on it Before we go on, let's put that update in a function, because we are going to need it again. def bayes_update_euro(pmf, data): """Do a Bayesian update. pmf: Series that represents a prior PMF data: tuple of number of heads, tails """ heads, tails = data xs = pmf.index likelihood_heads = xs / 100 likelihood_tails = 1 - likelihood_heads for i in range(heads): bayes_update(pmf, likelihood_heads) for i in range(tails): bayes_update(pmf, likelihood_tails) This function takes a PMF that represents the prior, and a tuple that contains the number of heads and tails. Here's the uniform prior again. xs = np.arange(101) prior = 1/101 uniform = make_pmf(xs, prior) Here's the update. data = 140, 110 bayes_update_euro(uniform, data) And here are the results again. uniform.plot() plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails'); ## A better prior Remember that this result is based on a uniform prior, which assumes that any value of $x$ from 0 to 100 is equally likely. Given what we know about coins, that's probabily not true. I can believe that if you spin a lop-sided coin on edge, it might be somewhat more likely to land on heads or tails. But unless the coin is heavily weighted on one side, I would be surprised if $x$ were greater than 60% or less than 40%. Of course, I could be wrong, but in general I would expect to find $x$ closer to 50%, and I would be surprised to find it near 0% or 100%. I can represent that prior believe with a triangle-shaped prior. Here's an array that ramps up from 0 to 49 and ramps down from 50 to 0. ramp_up = np.arange(50) ramp_down = np.arange(50, -1, -1) ps = np.append(ramp_up, ramp_down) I'll put it in a PMF and normalize it so it adds up to 1. triangle = make_pmf(xs, ps) triangle /= triangle.sum() Here's what the triangle prior looks like. triangle.plot(color='C1') plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('Triangle prior'); Now let's update it with the data. data = 140, 110 bayes_update_euro(triangle, data) And plot the results, along with the posterior based on a uniform prior. uniform.plot(label='Uniform') triangle.plot(label='Triangle') plt.xlabel('Possible values of x') plt.ylabel('Probability') plt.title('140 heads, 110 tails') plt.legend(); The posterior distributions are almost identical because, in this case, we have enough data to "swamp the prior"; that is, the posteriors depend strongly on the data and only weakly on the priors. This is good news, because it suggests that we can use data to resolve arguments. Suppose two people disagree about the correct prior. If neither can persuade the other, they might have to agree to disagree. But if they get new data, and each of them does a Bayesian update, they will usually find their beliefs converging. And with enough data, the remaining difference can be so small that it makes no difference in practice. ## Summarizing the posterior distribution The posterior distribution contains all of the information we have about the value of $x$. But sometimes we want to summarize this information. We have already seen one way to summarize a posterior distribution, the Maximum Aposteori Probability, or MAP: uniform.idxmax() `idxmax` returns the value of $x$ with the highest probability. In this example, we get the same MAP with the triangle prior: triangle.idxmax() Another way to summarize the posterior distribution is the posterior mean. Given a set of values, $x_i$, and the corresponding probabilities, $p_i$, the mean of the distribution is: $\sum_i x_i p_i$ The following function takes a Pmf and computes its mean. Note that this function only works correctly if the Pmf is normalized. def pmf_mean(pmf): """Compute the mean of a PMF. pmf: Series representing a PMF return: float """ return np.sum(pmf.index * pmf) Here's the posterior mean based on the uniform prior: pmf_mean(uniform) And here's the posterior mean with the triangle prior: pmf_mean(triangle) The posterior means are not identical, but they are close enough that the difference probably doesn't matter. In this example, the posterior mean is very close to the MAP. That's true when the posterior distribution is symmetric, but it is not always true. If someone asks what we think $x$ is, the MAP or the posterior mean might be a good answer. But MacKay asked a different question: do these data give evidence that the coin is biased rather than fair? We have more work to do before we can really answer this question. But first, I want to rule out an approach that is tempting, but incorrect. ## Posterior probability If the coin is "fair", that means that $x$ is 50%. So it might be tempting to use the posterior PMF to compute the probability that $x$
<reponame>MahdadJafarzadeh/Zzzscoring<filename>Zzzscoring_v0.1.7.py # -*- coding: utf-8 -*- """ Created on Wed Jun 24 00:23:30 2020 @author: mahda # ============================================================================= # # Copyright (c) 2020 <NAME> # # Zzzscoring: A GUI-based package for sleep scoring! # ============================================================================= """ from tkinter import LabelFrame, Label, Button, filedialog, messagebox,OptionMenu, StringVar,DoubleVar from tkinter import * import mne import numpy as np from numpy import loadtxt import time from ssccoorriinngg import ssccoorriinngg import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV class Zzzscoring(): def __init__(self, master): self.master = master master.title("Zzzscoring: Automatic sleep scoring package") #### !!~~~~~~~~~~~~~~~~~ DEFINE INPUT DATAFRAME ~~~~~~~~~~~~~~~~~!!#### self.frame_import = LabelFrame(self.master, text = "Import files section", padx = 150, pady = 100, font = 'Calibri 18 bold') self.frame_import.grid(row = 0 , column = 0, padx = 200, pady = 50, columnspan = 8) #### ==================== Help pop-up button ======================#### self.popup_button = Button(self.master, text = "Help", command = self.help_pop_up_func, font = 'Calibri 13 bold', fg = 'white', bg = 'black') self.popup_button.grid(row = 1, column = 8) #### ==================== Import data EDFs ========================#### # Label: Import EDF self.label_import = Label(self.frame_import, text = "Import EDF files here:", font = 'Calibri 12 bold') self.label_import.grid(row = 0 , column = 0, padx = 15, pady = 10) # Button: Import EDF (Browse) self.button_import_browse = Button(self.frame_import, text = "Browse data", padx = 100, pady = 20,font = 'Calibri 10 bold', command = self.load_data_file_dialog, fg = 'blue', relief = RIDGE) self.button_import_browse.grid(row = 1, column = 0, padx = 15, pady = 10) #### ================== Import hypnogram files ====================#### # Show a message about hypnograms self.label_hypnos = Label(self.frame_import, text = "Import hypnogram file (.txt) here:", font = 'Calibri 12 bold') self.label_hypnos.grid(row = 0 , column = 1, padx = 15, pady = 10) # Define browse button to import hypnos self.button_hypnos_browse = Button(self.frame_import, text = "Browse labels", padx = 100, pady = 20, font = 'Calibri 10 bold', command = self.load_hypno_file_dialog,fg = 'blue', relief = RIDGE) self.button_hypnos_browse.grid(row = 1, column = 1, padx = 15, pady = 10) #### ===================== Define train size ======================#### # Define train size section self.label_train_size = Label(self.frame_import, text = "Train size portion (between 0 - 1):", font = 'Calibri 12 bold') self.label_train_size.grid(row = 0 , column = 3, padx = 15, pady = 10) # Bar to ask for user's entry self.train_size = DoubleVar() self.train_size.set(0.7) self.entry_train_size = OptionMenu(self.frame_import, self.train_size, 0.6, 0.7, 0.8, 0.9) self.entry_train_size.grid(row = 1, column = 3, padx = 15, pady = 10) self.entry_train_size.config(font= 'Calibri 10 bold', fg='black') #### =================== Push apply to load data ==================#### #Label to read data and extract features self.label_apply = Label(self.frame_import, text = "Press to Load, pre-process, and extract features!", font = 'Calibri 12 bold') self.label_apply.grid(row = 0 , column = 4) # Apply button self.button_apply = Button(self.frame_import, text = "Apply", padx = 100, pady=20, font = 'Calibri 10 bold', relief = RIDGE, fg = 'blue', command = self.Apply_button) self.button_apply.grid(row = 1 , column =4, padx = 15, pady = 10) #### !!~~~~~~~~~~~~~~ DEFINE ML SECTION FRAME ~~~~~~~~~~~~~~~~~~~!!#### self.frame_ML = LabelFrame(self.master, text = "Machine Learning Section", padx = 150, pady = 100, font = 'Calibri 18 bold') self.frame_ML.grid(row = 1 , column = 0, padx = 200, pady = 50, columnspan = 8) #### ================ Pick ML Algorithm of interest ===============#### # Label self.label_ML_algorithm = Label(self.frame_ML, text = "Choose the machine learning algorithm:", font = 'Calibri 12 bold') self.label_ML_algorithm.grid(row = 0, column = 0, padx = 15, pady = 10) # Dropdown menu self.selected_ML = StringVar() self.selected_ML.set("Random forest") self.drop = OptionMenu(self.frame_ML, self.selected_ML, "ANN (MLP)", "SVM", "Random forest","XGBoost","Logistic regression", "Naive bayes", "Randomized trees","GradientBoosting", "ADABoost") self.drop.grid(row = 1, column = 0) self.drop.config(font= 'Calibri 10 bold', fg='blue') # label_selec self.label_select = Label(self.frame_ML, text = "Press after choosing ML algorithm:", font = 'Calibri 12 bold') self.label_select.grid(row = 0 , column =1) # select button self.button_select = Button(self.frame_ML, text = "Select!", padx = 100, pady=20, font = 'Calibri 12 bold', relief = RIDGE, fg = 'blue', command = self.Select_ML_button) self.button_select.grid(row = 1 , column =1, padx = 15, pady = 10) # Chekbox for time-dependency self.td_var = IntVar() self.checkbox_td = Checkbutton(self.frame_ML, text = "Multi-to-one classifcation", font = 'Calibri 12 bold', variable = self.td_var) self.checkbox_td.grid(row = 2, column = 0) # Chekbox for feature selection self.feat_select_var = IntVar() self.checkbox_feat_select = Checkbutton(self.frame_ML, text = "Feature Selection", font = 'Calibri 12 bold', variable = self.feat_select_var) self.checkbox_feat_select.grid(row = 3, column = 0) #%% ################### DEFINE FUNCTIONS OF BUTTONS ####################### #%% Function: Import EDF (Browse) def load_data_file_dialog(self): global data_files_list self.filenames = filedialog.askopenfilenames(initialdir= "C:/",title = 'select data files', filetype = (("edf", "*.edf"), ("All Files", "*.*"))) # Make a list of imported file names (full path) data_files_list = self.frame_import.tk.splitlist(self.filenames) self.n_data_files = len(data_files_list) # check if the user chose somthing if not data_files_list: self.label_data = Label(self.frame_import, text = "No file has been selected!", fg = 'red', font = 'Helvetica 9 bold').grid(row = 2, column = 0) else: self.label_data = Label(self.frame_import, text = str(self.n_data_files) + " EDF files has been loaded!", fg = 'green', font = 'Helvetica 9 bold').grid(row = 2, column = 0) #%% Function: Import Hypnogram (Browse) def load_hypno_file_dialog(self): global hypno_files_list self.filenames = filedialog.askopenfilenames(initialdir= "C:/",title = 'select label files', filetype = (("txt", "*.txt"),("csv", "*.csv"), ("All Files", "*.*"))) hypno_files_list = self.frame_import.tk.splitlist(self.filenames) self.n_label_files = len(hypno_files_list) # check if the user chose somthing if not hypno_files_list: self.label_labels = Label(self.frame_import, text = "No hypnogram has been selected!", fg = 'red', font = 'Helvetica 9 bold').grid(row = 2, column = 1) else: self.label_labels = Label(self.frame_import, text = str(self.n_label_files) + " hypnogram files has been loaded!", fg = 'green', font = 'Helvetica 9 bold').grid(row = 2, column = 1) #%% Read EDF and hypnograms and apply feature extraction def Read_Preproc_FeatExtract(self): global subjects_dic, hyp_dic, dic_pciked_chans subjects_dic = {} hyp_dic = {} dic_pciked_chans = {} #### ======================= Create log window ====================#### self.log_win = Toplevel() self.log_win.title("Log file of current processes") # Label self.label = Label(self.log_win, text= "Process log file:",font = 'Helvetica 12 bold') self.label.pack() self.close_log_win = Button(self.log_win, text="Dismiss", command=self.log_win.destroy) self.close_log_win.pack() #### ======================= Read data files ======================#### for idx, c_subj in enumerate(data_files_list): self.log1_ = Label(self.log_win, text = "Analyzing data: " + str(c_subj[-11:-4]) + "\tPlease wait ...").pack() print (f'Analyzing data: {c_subj[-11:-4]}') ## Read in data self.file = data_files_list[idx] tic = time.time() self.data = mne.io.read_raw_edf(self.file) # Data raw EEG --> Deactive # data.plot(duration = 30, highpass = .3 , lowpass = 25 ) self.raw_data = self.data.get_data() print('Time to read EDF: {}'.format(time.time()-tic)) self.log2_ = Label(self.log_win, text = "Time to read EDF data (s): " +str(np.round(time.time()-tic))).pack() #####=================Retrieving information from data====================##### self.DataInfo = self.data.info self.AvailableChannels = self.DataInfo['ch_names'] self.fs = int(self.DataInfo['sfreq']) #####================= Find index of required channels ===================##### # ============================================================================= # for indx, c in enumerate(AvailableChannels): # if c in RequiredChannels: # Idx.append(indx) # elif c in Mastoids: # Idx_Mastoids.append(indx) # ============================================================================= #####===== Sampling rate is 200hz; thus 1 epoch(30s) is 6000 samples =====##### T = 30 #secs len_epoch = self.fs * T start_epoch = 0 n_channels = len(self.AvailableChannels) #####============ Cut tail; use modulo to find full epochs ===============##### self.raw_data = self.raw_data[:, 0:self.raw_data.shape[1] - self.raw_data.shape[1]%len_epoch] print('extra tail of data has been successfully removed!') #####========== Reshape data [n_channel, len_epoch, n_epochs] ============##### data_epoched = np.reshape(self.raw_data, (n_channels, len_epoch, int(self.raw_data.shape[1]/len_epoch)), order='F' ) # Find corresponidng indices of the required chans self.Idx = [] # Find index of required channel(s) for indx, c in enumerate(self.AvailableChannels): if c in self.RequiredChannels: self.Idx.append(indx) data_epoched = data_epoched[self.Idx, :] #####===================== Reading hypnogram data ========================##### hyp = loadtxt(hypno_files_list[idx]) ### Create sepereate data subfiles based on hypnogram (N1, N2, N3, NREM, REM) tic = time.time() #####================= Concatenation of selected channels ================##### # Calculate referenced channels: #data_epoched_selected = data_epoched[Idx] - data_epoched[Idx_Mastoids] #####================= Find order
self._set_volume() self.reinit_object() elif 'mass' in interventions_dict: for i in range(0, len(self._pybullet_client_ids)): pybullet.changeDynamics( self._block_ids[i], -1, mass=self._mass, physicsClientId=self._pybullet_client_ids[i]) elif 'friction' in interventions_dict: self._set_lateral_friction(self._lateral_friction) if 'cartesian_position' in interventions_dict or 'orientation' in \ interventions_dict: for i in range(0, len(self._pybullet_client_ids)): position[-1] += WorldConstants.FLOOR_HEIGHT pybullet.resetBasePositionAndOrientation( self._block_ids[i], position, orientation, physicsClientId=self._pybullet_client_ids[i]) if 'color' in interventions_dict: self._color = interventions_dict['color'] self._set_color(self._color) if ('linear_velocity' in interventions_dict) ^ \ ('angular_velocity' in interventions_dict): for i in range(0, len(self._pybullet_client_ids)): linear_velocity, angular_velocity = \ pybullet.getBaseVelocity( self._block_ids[i], physicsClientId= self._pybullet_client_ids[i]) if 'linear_velocity' in interventions_dict: linear_velocity = interventions_dict['linear_velocity'] if 'angular_velocity' in interventions_dict: angular_velocity = interventions_dict['angular_velocity'] if 'angular_velocity' in interventions_dict or 'linear_velocity' in \ interventions_dict: for i in range(0, len(self._pybullet_client_ids)): pybullet.resetBaseVelocity( self._block_ids[i], linear_velocity, angular_velocity, physicsClientId=self._pybullet_client_ids[i]) return def get_state_variable_names(self): """ :return: (list) returns the state variable names. """ return self._state_variable_names def is_not_fixed(self): """ :return: (bool) true if its not fixed object. """ return self._not_fixed def get_bounds(self): """ :return: (tuple) first position of the tuple is the lower bound of the bounding box of the object and second position of the tuple is the upper bound of the bounding box. """ return self._lower_bounds, self._upper_bounds def get_state_size(self): """ :return: (int) specifies how large is the state of the object. """ return self._state_size def get_bounding_box(self): """ :return: (nd.array) first position of the array is the lower bound of the bounding box of the object and second position of the array is the upper bound of the bounding box. """ #should be the same in both bb = pybullet.getAABB(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0]) bb = np.array(bb) bb[0][-1] -= WorldConstants.FLOOR_HEIGHT bb[1][-1] -= WorldConstants.FLOOR_HEIGHT return bb def get_vertices(self): """ :return: (nd.array) specifies the current vertices of the object. """ position, orientation = pybullet.\ getBasePositionAndOrientation( self._block_ids[0], physicsClientId= self._pybullet_client_ids[0] ) position = np.array(position) position[-1] -= WorldConstants.FLOOR_HEIGHT vertices = [[1, 1, -1, 1], [1, -1, -1, 1], [-1, 1, -1, 1], [-1, -1, -1, 1], [1, 1, 1, 1], [1, -1, 1, 1], [-1, 1, 1, 1], [-1, -1, 1, 1]] temp_size = np.array([self._size[0], self._size[1], self._size[2], 2]) vertices = [(point * temp_size / 2.0) for point in vertices] return rotate_points(np.array(vertices), orientation, position) def world_to_cube_r_matrix(self): """ :return: (nd.array) returns the transformation matrix of the object. """ position, orientation = pybullet.\ getBasePositionAndOrientation( self._block_ids[0], physicsClientId= self._pybullet_client_ids[0] ) position = np.array(position) position[-1] -= WorldConstants.FLOOR_HEIGHT #TODO: double check if its not the inverse return get_transformation_matrix(position, orientation) def get_rotation_matrix(self): """ :return: (nd.array) returns the rotation matrix of the object. """ position, orientation = pybullet.\ getBasePositionAndOrientation( self._block_ids[0], physicsClientId= self._pybullet_client_ids[0] ) position = np.array(position) position[-1] -= WorldConstants.FLOOR_HEIGHT #TODO: double check if its not the inverse return get_rotation_matrix(orientation) def get_size(self): """ :return: (nd.array) returns the size of the object. """ return self._size def get_volume(self): """ :return: (nd.array) returns the volume of the object. """ return self._volume def get_name(self): """ :return: (str) returns the name of the object. """ return self._name def get_block_ids(self): """ :return: (list) returns the block ids in the active pybullet clients. """ return self._block_ids class Cuboid(RigidObject): def __init__( self, pybullet_client_ids, name, size=np.array([0.065, 0.065, 0.065]), initial_position=np.array([0.0, 0.0, 0.0425]), initial_orientation=np.array([0, 0, 0, 1]), mass=0.08, color=np.array([1, 0, 0]), initial_linear_velocity=np.array([0, 0, 0]), initial_angular_velocity=np.array([0, 0, 0]), lateral_friction=1, ): """ This specifies the moving cuboid object in the arena. :param pybullet_client_ids: (list) specifies the pybullet client ids. :param name: (str) specifies the name of the object. :param size: (list float) specifies the size in the three directions. :param initial_position: (list float) specifies the position in x,y,z. :param initial_orientation: (list float) specifies the quaternion of the object. :param mass: (float) specifies the mass of the object. :param color: (list float) specifies the RGB values of the cuboid. :param initial_linear_velocity: (list float) specifies the initial linear velocity vx, vy, vz. :param initial_angular_velocity: (list float) specifies the initial angular velocities. :param lateral_friction: (float) specifies the lateral friction. """ #TODO: intervene on friction as well super(Cuboid, self).__init__(pybullet_client_ids=pybullet_client_ids, name=name, size=size, initial_position=initial_position, initial_orientation=initial_orientation, mass=mass, color=color, fixed_bool=False, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity= initial_linear_velocity, initial_angular_velocity= initial_angular_velocity) def _create_object(self, pybullet_client_id, **kwargs): """ :param pybullet_client_id: (int) corresponding pybullet client to create the object in. :param kwargs: (params) parameters for the object creation. :return: (tuple) the first position specifies the shape_id and the second specifies the block id for pybullet. """ shape_id = pybullet.createCollisionShape( shapeType=pybullet.GEOM_BOX, halfExtents=np.array(self._size) / 2, physicsClientId=pybullet_client_id) position = np.array(self._initial_position) position[-1] += WorldConstants.FLOOR_HEIGHT block_id = pybullet.createMultiBody( baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id) return shape_id, block_id def _define_type_id(self): """ Sets the type id. :return: """ self._type_id = 1 return def get_recreation_params(self): """ :return: (dict) the creation parameters needed to recreate the object. """ recreation_params = dict() recreation_params['name'] = self._name recreation_params['size'] = self._size linear_velocity, angular_velocity = \ pybullet.getBaseVelocity( self._block_ids[0], physicsClientId= self._pybullet_client_ids[0]) position, orientation = pybullet. \ getBasePositionAndOrientation(self._block_ids[0], physicsClientId= self._pybullet_client_ids[0]) position = np.array(position) position[-1] -= WorldConstants.FLOOR_HEIGHT recreation_params['initial_position'] = position recreation_params['initial_orientation'] = orientation recreation_params['mass'] = self._mass recreation_params['color'] = self._color recreation_params['lateral_friction'] = self._lateral_friction recreation_params['initial_linear_velocity'] = \ linear_velocity recreation_params['initial_angular_velocity'] = \ angular_velocity return copy.deepcopy(recreation_params) class StaticCuboid(RigidObject): def __init__(self, pybullet_client_ids, name, size=np.array([0.065, 0.065, 0.065]), position=np.array([0.0, 0.0, 0.0425]), orientation=np.array([0, 0, 0, 1]), color=np.array([1, 0, 0]), lateral_friction=1): """ :param pybullet_client_ids: (list) specifies the pybullet clients. :param name: (str) specifies the name of the object. :param size: (list float) specifies the size in the three directions. :param position: (list float) specifies the position in x,y,z. :param orientation: (list float) specifies the quaternion of the object. :param color: (list float) specifies the RGB values of the cuboid. :param lateral_friction: (float) specifies the lateral friction. """ #TODO: intervene on friction as well super(StaticCuboid, self).__init__(pybullet_client_ids= pybullet_client_ids, name=name, size=size, initial_position=position, initial_orientation=orientation, mass=0, color=color, fixed_bool=True, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity= [0, 0, 0], initial_angular_velocity= [0, 0, 0]) def _create_object(self, pybullet_client_id, **kwargs): """ :param pybullet_client_id: (int) corresponding pybullet client to create the object in. :param kwargs: (params) parameters for the object creation. :return: (tuple) the first position specifies the shape_id and the second specifies the block id for pybullet. """ position = np.array(self._initial_position) position[-1] += WorldConstants.FLOOR_HEIGHT shape_id = pybullet.createCollisionShape( shapeType=pybullet.GEOM_BOX, halfExtents=np.array(self._size) / 2, physicsClientId=pybullet_client_id) block_id = pybullet.createMultiBody( baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id) return shape_id, block_id def _define_type_id(self): """ Sets the type id of the object. :return: """ self._type_id = 10 return def get_recreation_params(self): """ :return: (dict) the creation parameters needed to recreate the object. """ recreation_params = dict() recreation_params['name'] = self._name recreation_params['size'] = self._size position, orientation = pybullet. \ getBasePositionAndOrientation(self._block_ids[0], physicsClientId= self._pybullet_client_ids[0]) position = np.array(position) position[-1] -= WorldConstants.FLOOR_HEIGHT recreation_params['position'] = position recreation_params['orientation'] = orientation recreation_params['color'] = self._color recreation_params['lateral_friction'] = self._lateral_friction return copy.deepcopy(recreation_params) class MeshObject(RigidObject): def __init__(self, pybullet_client_ids, name, filename, scale=np.array([0.01, 0.01, 0.01]), initial_position=np.array([0.0, 0.0, 0.0425]), initial_orientation=np.array([0, 0, 0, 1]), color=np.array([1, 0, 0]), mass=0.08, initial_linear_velocity=np.array([0, 0, 0]), initial_angular_velocity=np.array([0, 0, 0]), lateral_friction=1): """ :param pybullet_client_ids: (list) specifies the pybullet clients. :param name: (str) specifies the name of the object. :param filename: (str) specifies the name of the file itself. :param scale: (list float) specifies the scale of the mesh object. :param initial_position: (list float) specifies the positions in x,y,z :param initial_orientation: (list float) specifies the quaternion of the object. :param color: (list float) specifies the RGB values. :param mass: (float) specifies the object mass. :param initial_linear_velocity: (list float) specifies the velocity in vx, vy, vz. :param initial_angular_velocity: (list float) specifies the velocity in yaw, roll, pitch. :param lateral_friction: (float) specifies the lateral friction. """ #TODO: intervene on friction as well self._scale = scale self._filename = filename super(MeshObject, self).__init__(pybullet_client_ids=pybullet_client_ids, name=name, size=[0, 0, 0], initial_position=initial_position, initial_orientation=initial_orientation, mass=mass, color=color, fixed_bool=False, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity=initial_linear_velocity, initial_angular_velocity=initial_angular_velocity) bb = self.get_bounding_box() self._size = np.array([(bb[1][0] - bb[0][0]), (bb[1][1] - bb[0][1]), (bb[1][2] - bb[0][2])]) def _create_object(self, pybullet_client_id, **kwargs): """ :param pybullet_client_id: (int) corresponding pybullet client to create the object in. :param kwargs: (params) parameters for the object creation. :return: (tuple) the first position specifies the shape_id and the second specifies the block id for pybullet. """ position = np.array(self._initial_position) position[-1] += WorldConstants.FLOOR_HEIGHT shape_id = pybullet.createCollisionShape( shapeType=pybullet.GEOM_MESH, meshScale=self._scale, fileName=self._filename, physicsClientId=pybullet_client_id) block_id = pybullet.createMultiBody( baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id) return shape_id, block_id def _define_type_id(self): """ Sets the type id of the object. :return: """ self._type_id = 2 return def get_recreation_params(self): """ :return: (dict) the creation parameters needed
node is for other pilot if item[3].pass_crossing_flag and item[0] >= max_lap_id - 1: # if called from 'check_race_time_expired()' then allow race tied after crossing if pass_node_index < 0: RACE.laps_winner_name = RACE.status_crossing else: # if called from 'pass_record_callback()' then no more ties RACE.laps_winner_name = RACE.status_tied_str logger.info('check_most_laps_win waiting for crossing, Node {0}'.format(item[3].index+1)) return else: pass_node_lap_id = item[0] # save 'lap_id' for node/pilot that caused current lap pass # if race currently tied and called from 'pass_record_callback()' and current-pass pilot # has not reached winning lap then bail out so pass will not stop a tied race in progress if RACE.laps_winner_name is RACE.status_tied_str and pass_node_index >= 0 and \ pass_node_lap_id < RACE.winning_lap_id: logger.debug('check_most_laps_win pilot not at winning lap, pass_node_index={0}, winning_lap_id={1}'.\ format(pass_node_index, RACE.winning_lap_id)) return # check for pilots with max laps; if more than one then select one with # earliest lap time (if called from 'pass_record_callback()' fn) or # indicate status tied (if called from 'check_race_time_expired()' fn) win_pilot_id = -1 win_lap_tstamp = 0 logger.debug('check_most_laps_win check max laps, pass_node_index={0}, max_lap_id={1}, laps_winner_name={2}'.\ format(pass_node_index, max_lap_id, RACE.laps_winner_name)) for item in pilots_list: if item[0] == max_lap_id: logger.debug('check_most_laps_win check max laps checking: pilot_id={0}, lap_tstamp={1}'.\ format(item[2], item[1])) if win_pilot_id < 0: # this is first one so far at max_lap win_pilot_id = item[2] win_lap_tstamp = item[1] else: # other pilots found at max_lap # if called from 'pass_record_callback()' and not waiting for crossing if pass_node_index >= 0 and RACE.laps_winner_name is not RACE.status_crossing: if item[1] < win_lap_tstamp: # this pilot has earlier lap time win_pilot_id = item[2] win_lap_tstamp = item[1] else: # called from 'check_race_time_expired()' or was waiting for crossing if RACE.laps_winner_name is not RACE.status_tied_str: logger.debug('check_most_laps_win check max laps, laps_winner_name was "{0}", setting to "{1}"'.\ format(RACE.laps_winner_name, RACE.status_tied_str)) RACE.laps_winner_name = RACE.status_tied_str # indicate status tied RACE.winning_lap_id = max_lap_id + 1 emit_team_racing_status(RACE.laps_winner_name) emit_phonetic_text('Race tied', 'race_winner') else: logger.debug('check_most_laps_win check max laps, laps_winner_name={0}'.\ format(RACE.laps_winner_name)) return # wait for next 'pass_record_callback()' event logger.debug('check_most_laps_win check max laps, win_pilot_id={0}, win_lap_tstamp={1}'.\ format(win_pilot_id, win_lap_tstamp)) if win_pilot_id >= 0: win_callsign = Database.Pilot.query.filter_by(id=win_pilot_id).one().callsign RACE.laps_winner_name = win_callsign # indicate a pilot has won emit_team_racing_status('Winner is ' + RACE.laps_winner_name) logger.info('check_most_laps_win result: Winner is {0}'.format(RACE.laps_winner_name)) win_phon_name = Database.Pilot.query.filter_by(id=win_pilot_id).one().phonetic if len(win_phon_name) <= 0: # if no phonetic then use callsign win_phon_name = win_callsign emit_phonetic_text('Race done, winner is ' + win_phon_name, 'race_winner') else: RACE.laps_winner_name = RACE.status_tied_str # indicate status tied def emit_phonetic_data(pilot_id, lap_id, lap_time, team_name, team_laps, **params): '''Emits phonetic data.''' raw_time = lap_time phonetic_time = RHUtils.phonetictime_format(lap_time) phonetic_name = Database.Pilot.query.get(pilot_id).phonetic callsign = Database.Pilot.query.get(pilot_id).callsign pilot_id = Database.Pilot.query.get(pilot_id).id emit_payload = { 'pilot': phonetic_name, 'callsign': callsign, 'pilot_id': pilot_id, 'lap': lap_id, 'raw_time': raw_time, 'phonetic': phonetic_time, 'team_name' : team_name, 'team_laps' : team_laps } if ('nobroadcast' in params): emit('phonetic_data', emit_payload) else: SOCKET_IO.emit('phonetic_data', emit_payload) def emit_first_pass_registered(node_idx, **params): '''Emits when first pass (lap 0) is registered during a race''' emit_payload = { 'node_index': node_idx, } Events.trigger(Evt.RACE_FIRST_PASS, { 'node_index': node_idx, }) if ('nobroadcast' in params): emit('first_pass_registered', emit_payload) else: SOCKET_IO.emit('first_pass_registered', emit_payload) def emit_phonetic_text(text_str, domain=False, **params): '''Emits given phonetic text.''' emit_payload = { 'text': text_str, 'domain': domain } if ('nobroadcast' in params): emit('phonetic_text', emit_payload) else: SOCKET_IO.emit('phonetic_text', emit_payload) def emit_enter_at_level(node, **params): '''Emits enter-at level for given node.''' emit_payload = { 'node_index': node.index, 'level': node.enter_at_level } if ('nobroadcast' in params): emit('node_enter_at_level', emit_payload) else: SOCKET_IO.emit('node_enter_at_level', emit_payload) def emit_exit_at_level(node, **params): '''Emits exit-at level for given node.''' emit_payload = { 'node_index': node.index, 'level': node.exit_at_level } if ('nobroadcast' in params): emit('node_exit_at_level', emit_payload) else: SOCKET_IO.emit('node_exit_at_level', emit_payload) def emit_node_crossing_change(node, **params): '''Emits crossing-flag change for given node.''' emit_payload = { 'node_index': node.index, 'crossing_flag': node.crossing_flag } if ('nobroadcast' in params): emit('node_crossing_change', emit_payload) else: SOCKET_IO.emit('node_crossing_change', emit_payload) def emit_callouts(): callouts = Options.get('voiceCallouts') if callouts: emit('callouts', json.loads(callouts)) def emit_imdtabler_page(**params): '''Emits IMDTabler page, using current profile frequencies.''' if Use_imdtabler_jar_flag: try: # get IMDTabler version string imdtabler_ver = subprocess.check_output( \ 'java -jar ' + IMDTABLER_JAR_NAME + ' -v', shell=True).rstrip() profile_freqs = json.loads(getCurrentProfile().frequencies) fi_list = list(OrderedDict.fromkeys(profile_freqs['f'][:RACE.num_nodes])) # remove duplicates fs_list = [] for val in fi_list: # convert list of integers to list of strings if val > 0: # drop any zero entries fs_list.append(str(val)) emit_imdtabler_data(fs_list, imdtabler_ver) except Exception: logger.exception('emit_imdtabler_page exception') def emit_imdtabler_data(fs_list, imdtabler_ver=None, **params): '''Emits IMDTabler data for given frequencies.''' try: imdtabler_data = None if len(fs_list) > 2: # if 3+ then invoke jar; get response imdtabler_data = subprocess.check_output( \ 'java -jar ' + IMDTABLER_JAR_NAME + ' -t ' + ' '.join(fs_list), shell=True) except Exception: imdtabler_data = None logger.exception('emit_imdtabler_data exception') emit_payload = { 'freq_list': ' '.join(fs_list), 'table_data': imdtabler_data, 'version_str': imdtabler_ver } if ('nobroadcast' in params): emit('imdtabler_data', emit_payload) else: SOCKET_IO.emit('imdtabler_data', emit_payload) def emit_imdtabler_rating(): '''Emits IMDTabler rating for current profile frequencies.''' try: profile_freqs = json.loads(getCurrentProfile().frequencies) imd_val = None fi_list = list(OrderedDict.fromkeys(profile_freqs['f'][:RACE.num_nodes])) # remove duplicates fs_list = [] for val in fi_list: # convert list of integers to list of strings if val > 0: # drop any zero entries fs_list.append(str(val)) if len(fs_list) > 2: imd_val = subprocess.check_output( # invoke jar; get response 'java -jar ' + IMDTABLER_JAR_NAME + ' -r ' + ' '.join(fs_list), shell=True).rstrip() except Exception: imd_val = None logger.exception('emit_imdtabler_rating exception') emit_payload = { 'imd_rating': imd_val } SOCKET_IO.emit('imdtabler_rating', emit_payload) def emit_vrx_list(*args, **params): ''' get list of connected VRx devices ''' if vrx_controller: # if vrx_controller.has_connection: vrx_list = {} for vrx in vrx_controller.rx_data: vrx_list[vrx] = vrx_controller.rx_data[vrx] emit_payload = { 'enabled': True, 'connection': True, 'vrx': vrx_list } # else: # emit_payload = { # 'enabled': True, # 'connection': False, # } else: emit_payload = { 'enabled': False, 'connection': False } if ('nobroadcast' in params): emit('vrx_list', emit_payload) else: SOCKET_IO.emit('vrx_list', emit_payload) @SOCKET_IO.on('set_vrx_node') def set_vrx_node(data): vrx_id = data['vrx_id'] node = data['node'] if vrx_controller: vrx_controller.set_node_number(serial_num=vrx_id, desired_node_num=node) logger.info("Set VRx {0} to node {1}".format(vrx_id, node)) else: logger.error("Can't set VRx {0} to node {1}: Controller unavailable".format(vrx_id, node)) # # Program Functions # def heartbeat_thread_function(): '''Allow time for connection handshake to terminate before emitting data''' gevent.sleep(0.010) '''Emits current rssi data.''' while True: try: global RACE node_data = INTERFACE.get_heartbeat_json() SOCKET_IO.emit('heartbeat', node_data) heartbeat_thread_function.iter_tracker += 1 # check if race timer is finished if RACE.timer_running: check_race_time_expired() # update displayed IMD rating after freqs changed: if heartbeat_thread_function.imdtabler_flag and \ (heartbeat_thread_function.iter_tracker % HEARTBEAT_DATA_RATE_FACTOR) == 0: heartbeat_thread_function.imdtabler_flag = False emit_imdtabler_rating() # emit rest of node data, but less often: if (heartbeat_thread_function.iter_tracker % (4*HEARTBEAT_DATA_RATE_FACTOR)) == 0: emit_node_data() # emit cluster status less often: if (heartbeat_thread_function.iter_tracker % (4*HEARTBEAT_DATA_RATE_FACTOR)) == (2*HEARTBEAT_DATA_RATE_FACTOR): CLUSTER.emitStatus() # collect vrx lock status if (heartbeat_thread_function.iter_tracker % (10*HEARTBEAT_DATA_RATE_FACTOR)) == 0: if vrx_controller: # if vrx_controller.has_connection vrx_controller.get_node_lock_status() vrx_controller.request_variable_status() if (heartbeat_thread_function.iter_tracker % (10*HEARTBEAT_DATA_RATE_FACTOR)) == 4: # emit display status with offset if vrx_controller: emit_vrx_list() # emit environment data less often: if (heartbeat_thread_function.iter_tracker % (20*HEARTBEAT_DATA_RATE_FACTOR)) == 0: INTERFACE.update_environmental_data() emit_environmental_data() time_now = monotonic() # check if race is to be started if RACE.scheduled: if time_now > RACE.scheduled_time: on_stage_race() RACE.scheduled = False # if any comm errors then log them (at defined intervals; faster if debug mode) if time_now > heartbeat_thread_function.last_error_rep_time + \ (ERROR_REPORT_INTERVAL_SECS if not Config.GENERAL['DEBUG'] \ else ERROR_REPORT_INTERVAL_SECS/10): heartbeat_thread_function.last_error_rep_time = time_now if INTERFACE.get_intf_total_error_count() > 0: logger.info(INTERFACE.get_intf_error_report_str()) gevent.sleep(0.500/HEARTBEAT_DATA_RATE_FACTOR) except KeyboardInterrupt: logger.info("Heartbeat thread terminated by keyboard interrupt") raise except SystemExit: raise except Exception: logger.exception('Exception in Heartbeat thread loop') gevent.sleep(0.500) # declare/initialize variables for heartbeat functions heartbeat_thread_function.iter_tracker = 0 heartbeat_thread_function.imdtabler_flag = False heartbeat_thread_function.last_error_rep_time = monotonic() def ms_from_race_start(): '''Return milliseconds since race start.''' delta_time = monotonic() - RACE.start_time_monotonic milli_sec = delta_time * 1000.0 return milli_sec def ms_to_race_start(): '''Return milliseconds since race start.''' if RACE.scheduled: delta_time = monotonic() - RACE.scheduled_time milli_sec = delta_time * 1000.0 return milli_sec else: return None def ms_from_program_start(): '''Returns the elapsed milliseconds since the start of the program.''' delta_time = monotonic() - PROGRAM_START milli_sec = delta_time * 1000.0 return milli_sec def check_race_time_expired(): race_format = getCurrentRaceFormat() if race_format and race_format.race_mode == 0: # count down if monotonic() >= RACE.start_time_monotonic + race_format.race_time_sec: RACE.timer_running = False # indicate race timer no longer running Events.trigger(Evt.RACE_FINISH) if race_format.win_condition == WinCondition.MOST_LAPS: # Most Laps Wins Enabled check_most_laps_win() # check if pilot or team has most laps for win def pass_record_callback(node, lap_timestamp_absolute, source): '''Handles pass records from the nodes.''' logger.debug('Raw pass record: Node: {0}, MS Since Lap: {1}'.format(node.index+1, lap_timestamp_absolute)) node.pass_crossing_flag = False # clear the "synchronized" version of the crossing flag node.debug_pass_count += 1 emit_node_data() # For updated triggers and peaks global RACE profile_freqs = json.loads(getCurrentProfile().frequencies) if profile_freqs["f"][node.index]
image_stacks: results: """ #__table_args__ = (db.UniqueConstraint("study_id","position", name = "imgset_id"),) id = db.Column(db.Integer(), primary_key=True) study_id = db.Column(db.Integer(), db.ForeignKey("study.id"), nullable=False) position = db.Column(db.Integer(), nullable=False) image_stacks = db.relationship("Image_stack", lazy=False, cascade="all, delete-orphan") def get_stack_by_div_id(self,div_id): stack = [stack for stack in self.image_stacks if stack.div_id == div_id] if len(stack) == 1: stack = stack[0] else: stack = None return stack def to_dict(self): dict = {} dict["id"] = self.id dict["study_id"] = self.study_id dict["position"] = self.position dict["image_stacks"] = [] for stack in self.image_stacks: dict["image_stacks"].append(stack.to_dict()) return dict class Image(db.Model): """ Class to keep track of image files associated with a study Attributes: url: """ id = db.Column(db.Integer(), primary_key=True) base_url = db.Column(db.String(1000)) name = db.Column(db.String(120)) #table for m to n relationships (image_stacks - images) stack_images = db.Table("stack_images", db.Column("image_stack_id",db.Integer, db.ForeignKey("image_stack.id"), primary_key=True), db.Column("image_id",db.Integer, db.ForeignKey("image.id"), primary_key=True) ) class Image_stack(db.Model): """ Class to save image configurations used by HON linked to imgset Attributes: div_id: url: viewport: tool_state: """ __table_args__ = (db.UniqueConstraint("imgset_id","result_id","div_id"), db.CheckConstraint('(imgset_id IS NULL) <> (result_id IS NULL)'),) id = db.Column(db.Integer(), primary_key=True) # image either belongs to a result or an imgset imgset_id = db.Column(db.Integer(), db.ForeignKey("imgset.id")) result_id = db.Column(db.Integer(), db.ForeignKey("result.id")) div_id = db.Column(db.String(120)) name = db.Column(db.String(120)) images = db.relationship("Image",secondary=stack_images, backref=db.backref("image_stacks", lazy=True),lazy='subquery', order_by='Image.name') viewport = db.Column(db.String(1000)) tool_state = db.Column(db.Text(1000000)) seg_data = db.Column(db.Text(100000000)) def to_dict(self): image_stack_dict = {} image_stack_dict["div_id"] = self.div_id image_stack_dict["name"] = self.name imageIds = ['wadouri:' + image.base_url + image.name if ".dcm" in image.name else image.base_url + image.name for image in self.images] image_stack_dict["cs_stack"] = {"imageIds":imageIds, "currentImageIdIndex":0} if self.viewport: image_stack_dict["viewport"] = json.loads(self.viewport) if self.tool_state: image_stack_dict["tool_state"] = json.loads(self.tool_state) image_stack_dict["seg_data"] = self.seg_data return image_stack_dict def get_filenames(self): image_names = json.loads(self.image_names) return image_names def save_seg_data(self,file_path): data = json.loads(self.seg_data) arrays1d = data[0:-1] x_res = data[-1][0] y_res = data[-1][1] arrays2d = [] for array1d in arrays1d: if array1d: array1d_temp = np.array(array1d,dtype=np.int16) else: array1d_temp = np.zeros(x_res*y_res) array1d_temp = array1d_temp.reshape(x_res,y_res) array1d_temp = np.flip(array1d_temp,1) array1d_temp = np.rot90(array1d_temp) arrays2d.append(array1d_temp) array3d = np.stack(arrays2d,-1) new_image = nib.Nifti1Image(array3d, affine=np.eye(4)) new_image.header.get_xyzt_units() new_image.to_filename(file_path) #### none db model classes # might be possible to move all of these classes and functions to the database model # not done so far since the design and requirments # of scale input and tool data kept changing during development => compatibility to old dbs easier # both of these dont have their own table rigth now class Output: """ Class to handle the conversion of data from the db tables to the outputfile Args: study_id Attributes: table_header col_info table_data """ def __init__(self,study): self.study = study # meta data self.row_numb = 0 self.max_stack_size = 0 self.incl_expl = False # cols always present self.imgset = {"imgset_id":[]} self.user = {"username":[]} self.date = {"date":[]} self.stacks_disp = {} for i in range(self.study.design.numb_img): self.stacks_disp["stack-%s"%str(i+1)] = [] self.stacks_disp["stack-%s-files"%str(i+1)] = [] self.stack_user = {"stack-user":[], "stack-user-files":[]} # cols optional self.ref_stacks = {} for i in range(self.study.design.numb_refimg): self.ref_stacks["ref-stack-%s"%str(i+1)] = [] self.ref_stacks["ref-stack-%s-files"%str(i+1)] = [] self.scale_input = {} self.tool_gt = {} self.tool_input = {} self.overlap_data = {} def get_data(self,users): for result in self.study.results: # col always present self.get_imgset_data(result) self.get_user_data(result,users) self.get_img_disp_data(result) self.get_date_data(result) self.get_img_user_data(result) # optional columns self.get_ref_img_data(result) self.get_scale_data(result) self.get_tool_gt_data(result) self.get_tool_input_data(result) self.row_numb += 1 # get data for columns always present def get_imgset_data(self,result): self.imgset["imgset_id"].append(result.imgset.position) def get_user_data(self,result,users): username = [user.username for user in users if user.id == result.user_id][0] self.user["username"].append(username) def get_img_disp_data(self,result): for i in range(self.study.design.numb_img): div_id_ref = "dicom_img_" + str(i+2) stack = result.imgset.get_stack_by_div_id(div_id_ref) # stack can be none if left blank if stack: image_names = [image.name for image in stack.images] self.stacks_disp["stack-%s"%str(i+1)].append(stack.name) self.stacks_disp["stack-%s-files"%str(i+1)].append(image_names) self.max_stack_size = max(self.max_stack_size,len(image_names)) else: self.stacks_disp["stack-%s"%str(i+1)].append(None) self.stacks_disp["stack-%s-files"%str(i+1)].append(None) def get_img_user_data(self,result): stack_picked = result.stack_picked # stack can be none if left blank if stack_picked: image_names = [image.name for image in stack_picked.images] self.stack_user["stack-user"].append(stack_picked.name) self.stack_user["stack-user-files"].append(image_names) else: self.stack_user["stack-user"].append(None) self.stack_user["stack-user-files"].append(None) def get_date_data(self,result): self.date["date"].append(result.created) # get data for optional columns def get_ref_img_data(self,result): for i in range(self.study.design.numb_refimg): div_id_ref = "dicom_img_" + str(i) stack = result.imgset.get_stack_by_div_id(div_id_ref) # stack can be none if left blank if stack: image_names = [image.name for image in stack.images] self.ref_stacks["ref-stack-%s"%str(i+1)].append(stack.name) self.ref_stacks["ref-stack-%s-files"%str(i+1)].append(image_names) self.max_stack_size = max(self.max_stack_size,len(image_names)) else: self.ref_stacks["ref-stack-%s"%str(i+1)].append(None) self.ref_stacks["ref-stack-%s-files"%str(i+1)].append(None) def get_scale_data(self,result): if result.scale_input: scale_input = json.loads(result.scale_input) # scale input is a list of dictonaries with sub-dictonaries # dict keys are the scale text # sub-dict keys are values (scale input collected from user) and uuid (was used to link scale data to annotations i.e. rois) # values and uuids are lists as scales can be repeated (FROC studies) for scale_text in scale_input.keys(): values = scale_input[scale_text]["values"] scale_header = scale_text if scale_header in self.scale_input.keys(): self.scale_input[scale_header].append(values) else: self.scale_input[scale_header] = [None] * self.row_numb + [values] # ensure all cols have same length for k,v in self.scale_input.items(): if k not in scale_input.keys(): v.append(None) else: # ensure all cols have same length for k,v in self.scale_input.items(): v.append(None) def get_tool_gt_data(self,result): for i in range(self.study.design.numb_img): div_id_ref = "dicom_img_%s"%str(i+2) stack = result.imgset.get_stack_by_div_id(div_id_ref) # stack can be none if left blank if stack and stack.tool_state: stack_tool_states = json.loads(stack.tool_state) # stack tools state is list of cornerstone tool states # each entry corresponds to an image within the stack # each image tool state can consist of multiple tool input for img in range(len(stack.images)): tool_state = stack_tool_states[img] if tool_state: for tool in tool_state: col_name = "stack-%s-pos-%s-%s"%(i+1,img+1,tool) if col_name in self.tool_gt.keys(): self.tool_gt[col_name].append(tool_state[tool]["data"]) else: self.tool_gt[col_name] = [None] * self.row_numb + [tool_state[tool]["data"]] # ensure all cols have same length for k, v in self.tool_gt.items(): if len(v) < self.row_numb+1: v.append(None) def get_tool_input_data(self,result): if result.stack_picked.tool_state: stack_tool_states = json.loads(result.stack_picked.tool_state) # stack tools state is list of cornerstone tool states # each entry corresponds to an image within the stack # each image tool state can consist of multiple tool inputs for i in range(len(result.stack_picked.images)): tool_state = stack_tool_states[i] if tool_state: for tool in tool_state: col_name = "stack-user-pos-%s-%s"%(i+1,tool) # col already exists if col_name in self.tool_input.keys(): self.tool_input[col_name].append(tool_state[tool]["data"]) # col is new else: self.tool_input[col_name] = [None] * self.row_numb + [tool_state[tool]["data"]] # ensure all cols have same length for k,v in self.tool_input.items(): if len(v) < self.row_numb+1: v.append(None) def calc_overlap_data(self): rois_cols_input = {k: v for k, v in self.tool_input.items() if "Roi" in k} for row in range(self.row_numb): stack_name_user = self.stack_user["stack-user"][row] gt_ind= int([k.split("-")[1] for k,v in self.stacks_disp.items() if v[row] == stack_name_user][0]) for k_input, v_input in rois_cols_input.items(): roi_type = k_input.split("-")[4] stack_pos_input = k_input.split("-")[3] col_name_gt = "stack-%s-pos-%s-%s"%(str(gt_ind),stack_pos_input,roi_type) rois_input = v_input[row] rois_gt = None if col_name_gt in self.tool_gt: rois_gt = self.tool_gt[col_name_gt][row] if not rois_gt or not rois_input or len(rois_gt) == 0 or len(rois_input) == 0: continue # iterate over rois and calc metrics for metric in ["dice"]: ov_gt = np.zeros(shape=(len(rois_gt),len(rois_input))) ov_input = np.zeros(shape=(len(rois_input),len(rois_gt))) for i, roi_gt in enumerate(rois_gt): roi_gt = eval(roi_type + "(roi_gt)" ) for j, roi_input in enumerate(rois_input): roi_input = eval(roi_type + "(roi_input)" ) overlap = roi_gt.calc_seq_metric(roi_input, metric) ov_gt[i][j] = overlap ov_input[j][i] = overlap col_name_gt = col_name_gt + "-%s"%(metric) ov_gt = [max(ov) for ov in ov_gt] if col_name_gt in self.overlap_data.keys(): self.overlap_data[col_name_gt].append(ov_gt) else: self.overlap_data[col_name_gt] = [None] * row + [ov_gt] col_name_input = k_input + "-%s"%(metric) ov_input = [max(ov) for ov in ov_input] if col_name_input in self.overlap_data.keys(): self.overlap_data[col_name_input].append(ov_input) else: self.overlap_data[col_name_input] = [None] * row + [ov_input] # ensure all cols have same length for k, v in self.overlap_data.items(): if len(v) < row+1: v.append(None) def save_table(self,format = "excel", include_ov=True, include_raw_tool_data=False, include_expl=False): # combine data into pandas dataframe self.df = pd.DataFrame({**self.imgset,**self.user,**self.stacks_disp,**self.date,**self.scale_input,**self.stack_user}) for k,v in self.tool_gt.items(): self.df[k] = v for k,v in self.tool_input.items(): self.df[k] = v # add formatting e.g. col names, col splitting .... self.format_tool_data(include_raw_tool_data) if include_ov: # calc metrics (e.g. dice, iou) self.calc_overlap_data() for k,v in self.overlap_data.items(): self.df[k] = v # to do, add sepecial case for max stack size = 1 self.format_simplify() if format == "excel": filepath=os.path.join(current_app.config["IMAGE_PATH"],"results_study_%s.xlsx" % self.study.id) self.df.to_excel(filepath, index=False) else: filepath=os.path.join(current_app.config["IMAGE_PATH"],"results_study_%s.xlsx" % self.study.id) self.df.to_csv(filepath, index=False) def format_tool_data(self,include_raw_tool_data): self.format_roi_data(include_raw_tool_data) self.format_length_data(include_raw_tool_data) self.format_segmentation_data(include_raw_tool_data) def format_roi_data(self,include_raw_tool_data): rois_cols = [column for column in self.df if "Roi" in column and not "dice" in column] for roi_col in rois_cols: roi_type = roi_col.split("-")[4] area_col, mean_HU_col, sd_HU_col, start_col, end_col = [], [], [], [], [] for imgset in range(self.row_numb): rois = self.df[roi_col][imgset] area_img, mean_HU_img, sd_HU_img, start_img, end_img = [], [], [],[], [] if rois: for roi in rois: roi = eval(roi_type
<gh_stars>0 r""" Orthogonal Polynomials - The Chebyshev polynomial of the first kind arises as a solution to the differential equation .. MATH:: (1-x^2)\,y'' - x\,y' + n^2\,y = 0 and those of the second kind as a solution to .. MATH:: (1-x^2)\,y'' - 3x\,y' + n(n+2)\,y = 0. The Chebyshev polynomials of the first kind are defined by the recurrence relation .. MATH:: T_0(x) = 1 \, T_1(x) = x \, T_{n+1}(x) = 2xT_n(x) - T_{n-1}(x). \, The Chebyshev polynomials of the second kind are defined by the recurrence relation .. MATH:: U_0(x) = 1 \, U_1(x) = 2x \, U_{n+1}(x) = 2xU_n(x) - U_{n-1}(x). \, For integers `m,n`, they satisfy the orthogonality relations .. MATH:: \int_{-1}^1 T_n(x)T_m(x)\,\frac{dx}{\sqrt{1-x^2}} =\left\{ \begin{matrix} 0 &: n\ne m~~~~~\\ \pi &: n=m=0\\ \pi/2 &: n=m\ne 0 \end{matrix} \right. and .. MATH:: \int_{-1}^1 U_n(x)U_m(x)\sqrt{1-x^2}\,dx =\frac{\pi}{2}\delta_{m,n}. They are named after Pafnuty Chebyshev (alternative transliterations: Tchebyshef or Tschebyscheff). - The Hermite polynomials are defined either by .. MATH:: H_n(x)=(-1)^n e^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2} (the "probabilists' Hermite polynomials"), or by .. MATH:: H_n(x)=(-1)^n e^{x^2}\frac{d^n}{dx^n}e^{-x^2} (the "physicists' Hermite polynomials"). Sage (via Maxima) implements the latter flavor. These satisfy the orthogonality relation .. MATH:: \int_{-\infty}^\infty H_n(x)H_m(x)\,e^{-x^2}\,dx ={n!2^n}{\sqrt{\pi}}\delta_{nm} They are named in honor of <NAME>. - Each *Legendre polynomial* `P_n(x)` is an `n`-th degree polynomial. It may be expressed using Rodrigues' formula: .. MATH:: P_n(x) = (2^n n!)^{-1} {\frac{d^n}{dx^n} } \left[ (x^2 -1)^n \right]. These are solutions to Legendre's differential equation: .. MATH:: {\frac{d}{dx}} \left[ (1-x^2) {\frac{d}{dx}} P(x) \right] + n(n+1)P(x) = 0. and satisfy the orthogonality relation .. MATH:: \int_{-1}^{1} P_m(x) P_n(x)\,dx = {\frac{2}{2n + 1}} \delta_{mn} The *Legendre function of the second kind* `Q_n(x)` is another (linearly independent) solution to the Legendre differential equation. It is not an "orthogonal polynomial" however. The associated Legendre functions of the first kind `P_\ell^m(x)` can be given in terms of the "usual" Legendre polynomials by .. MATH:: \begin{array}{ll} P_\ell^m(x) &= (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}P_\ell(x) \\ &= \frac{(-1)^m}{2^\ell \ell!} (1-x^2)^{m/2}\frac{d^{\ell+m}}{dx^{\ell+m}}(x^2-1)^\ell. \end{array} Assuming `0 \le m \le \ell`, they satisfy the orthogonality relation: .. MATH:: \int_{-1}^{1} P_k ^{(m)} P_\ell ^{(m)} dx = \frac{2 (\ell+m)!}{(2\ell+1)(\ell-m)!}\ \delta _{k,\ell}, where `\delta _{k,\ell}` is the Kronecker delta. The associated Legendre functions of the second kind `Q_\ell^m(x)` can be given in terms of the "usual" Legendre polynomials by .. MATH:: Q_\ell^m(x) = (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}Q_\ell(x). They are named after Adrien-Marie Legendre. - Laguerre polynomials may be defined by the Rodrigues formula .. MATH:: L_n(x)=\frac{e^x}{n!}\frac{d^n}{dx^n}\left(e^{-x} x^n\right). They are solutions of Laguerre's equation: .. MATH:: x\,y'' + (1 - x)\,y' + n\,y = 0\, and satisfy the orthogonality relation .. MATH:: \int_0^\infty L_m(x) L_n(x) e^{-x}\,dx = \delta_{mn}. The generalized Laguerre polynomials may be defined by the Rodrigues formula: .. MATH:: L_n^{(\alpha)}(x) = {\frac{x^{-\alpha} e^x}{n!}}{\frac{d^n}{dx^n}} \left(e^{-x} x^{n+\alpha}\right) . (These are also sometimes called the associated Laguerre polynomials.) The simple Laguerre polynomials are recovered from the generalized polynomials by setting `\alpha =0`. They are named after <NAME>. - Jacobi polynomials are a class of orthogonal polynomials. They are obtained from hypergeometric series in cases where the series is in fact finite: .. MATH:: P_n^{(\alpha,\beta)}(z) =\frac{(\alpha+1)_n}{n!} \,_2F_1\left(-n,1+\alpha+\beta+n;\alpha+1;\frac{1-z}{2}\right) , where `()_n` is Pochhammer's symbol (for the rising factorial), (Abramowitz and Stegun p561.) and thus have the explicit expression .. MATH:: P_n^{(\alpha,\beta)} (z) = \frac{\Gamma (\alpha+n+1)}{n!\Gamma (\alpha+\beta+n+1)} \sum_{m=0}^n \binom{n}{m} \frac{\Gamma (\alpha + \beta + n + m + 1)}{\Gamma (\alpha + m + 1)} \left(\frac{z-1}{2}\right)^m . They are named after <NAME>. - Ultraspherical or Gegenbauer polynomials are given in terms of the Jacobi polynomials `P_n^{(\alpha,\beta)}(x)` with `\alpha=\beta=a-1/2` by .. MATH:: C_n^{(a)}(x)= \frac{\Gamma(a+1/2)}{\Gamma(2a)}\frac{\Gamma(n+2a)}{\Gamma(n+a+1/2)} P_n^{(a-1/2,a-1/2)}(x). They satisfy the orthogonality relation .. MATH:: \int_{-1}^1(1-x^2)^{a-1/2}C_m^{(a)}(x)C_n^{(a)}(x)\, dx =\delta_{mn}2^{1-2a}\pi \frac{\Gamma(n+2a)}{(n+a)\Gamma^2(a)\Gamma(n+1)} , for `a>-1/2`. They are obtained from hypergeometric series in cases where the series is in fact finite: .. MATH:: C_n^{(a)}(z) =\frac{(2a)^{\underline{n}}}{n!} \,_2F_1\left(-n,2a+n;a+\frac{1}{2};\frac{1-z}{2}\right) where `\underline{n}` is the falling factorial. (See Abramowitz and Stegun p561) They are named for Leopold Gegenbauer (1849-1903). For completeness, the Pochhammer symbol, introduced by Leo August Pochhammer, `(x)_n`, is used in the theory of special functions to represent the "rising factorial" or "upper factorial" .. MATH:: (x)_n=x(x+1)(x+2)\cdots(x+n-1)=\frac{(x+n-1)!}{(x-1)!}. On the other hand, the "falling factorial" or "lower factorial" is .. MATH:: x^{\underline{n}}=\frac{x!}{(x-n)!} , in the notation of <NAME>, <NAME> and <NAME> in their book Concrete Mathematics. .. TODO:: Implement Zernike polynomials. :wikipedia:`Zernike_polynomials` REFERENCES: - [AS1964]_ - :wikipedia:`Chebyshev_polynomials` - :wikipedia:`Legendre_polynomials` - :wikipedia:`Hermite_polynomials` - http://mathworld.wolfram.com/GegenbauerPolynomial.html - :wikipedia:`Jacobi_polynomials` - :wikipedia:`Laguerre_polynomia` - :wikipedia:`Associated_Legendre_polynomials` - [Koe1999]_ AUTHORS: - <NAME> (2006-06) - <NAME> (2010-) - <NAME> (2015-) The original module wrapped some of the orthogonal/special functions in the Maxima package "orthopoly" and was written by <NAME> of the University of Nebraska at Kearney. """ # **************************************************************************** # Copyright (C) 2006 <NAME> <<EMAIL>> # 2006 <NAME> <<EMAIL>> # 2010 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # https://www.gnu.org/licenses/ # **************************************************************************** import warnings from sage.misc.latex import latex from sage.rings.all import ZZ, QQ, RR, CC from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing import sage.rings.abc from sage.symbolic.function import BuiltinFunction, GinacFunction from sage.symbolic.expression import Expression from sage.symbolic.ring import SR from sage.functions.other import factorial, binomial from sage.structure.all import parent class OrthogonalFunction(BuiltinFunction): """ Base class for orthogonal polynomials. This class is an abstract base class for all orthogonal polynomials since they share similar properties. The evaluation as a polynomial is either done via maxima, or with pynac. Convention: The first argument is always the order of the polynomial, the others are other values or parameters where the polynomial is evaluated. """ def __init__(self, name, nargs=2, latex_name=None, conversions={}): """ :class:`OrthogonalFunction` class needs the same input parameter as it's parent class. EXAMPLES:: sage: from sage.functions.orthogonal_polys import OrthogonalFunction sage: new = OrthogonalFunction('testo_P') sage: new testo_P """ try: self._maxima_name = conversions['maxima'] except KeyError: self._maxima_name = None super(OrthogonalFunction,self).__init__(name=name, nargs=nargs, latex_name=latex_name, conversions=conversions) def eval_formula(self, *args): """ Evaluate this polynomial using an explicit formula. EXAMPLES:: sage: from sage.functions.orthogonal_polys import OrthogonalFunction sage: P = OrthogonalFunction('testo_P') sage: P.eval_formula(1,2.0) Traceback (most recent call last): ... NotImplementedError: no explicit calculation of values implemented """ raise NotImplementedError("no explicit calculation of values implemented") def _eval_special_values_(self, *args): """ Evaluate the polynomial explicitly for special values. EXAMPLES:: sage: var('n') n sage: chebyshev_T(n,-1) (-1)^n """ raise ValueError("no special values known") def _eval_(self, n, *args): """ The :meth:`_eval_()` method decides which evaluation suits best for the given input, and returns a proper value. EXAMPLES:: sage: var('n,x') (n, x) sage: chebyshev_T(5,x) 16*x^5 - 20*x^3 + 5*x """ return None def __call__(self, *args, **kwds): """ This overides the call method from SageObject to avoid problems with coercions, since the _eval_ method is able to handle more data types than symbolic functions would normally allow. Thus we have the distinction between algebraic objects (if n is an integer), and else as symbolic function. EXAMPLES:: sage: chebyshev_T(5, x) 16*x^5 - 20*x^3 + 5*x sage: chebyshev_T(5, x, algorithm='pari') 16*x^5 - 20*x^3 + 5*x sage: chebyshev_T(5, x, algorithm='maxima') 16*x^5 - 20*x^3 + 5*x sage: chebyshev_T(5, x, algorithm='recursive') 16*x^5 - 20*x^3 + 5*x """ algorithm = kwds.get('algorithm', None) if algorithm == 'pari': return self.eval_pari(*args, **kwds) elif algorithm == 'recursive': return self.eval_recursive(*args, **kwds) elif algorithm == 'maxima': from sage.calculus.calculus import maxima kwds['hold'] = True return maxima(self._eval_(*args, **kwds))._sage_() return super(OrthogonalFunction,self).__call__(*args, **kwds) class ChebyshevFunction(OrthogonalFunction): """ Abstract base class for Chebyshev polynomials of the first and second kind. EXAMPLES:: sage: chebyshev_T(3,x) 4*x^3 - 3*x """ def __call__(self, n, *args, **kwds): """ This overides the call method from SageObject to avoid problems with coercions, since the _eval_ method is able to handle more data types than symbolic functions would normally allow. Thus we have the distinction between algebraic objects (if n is an integer), and else as symbolic function. EXAMPLES:: sage: K.<a> = NumberField(x^3-x-1) sage: chebyshev_T(5, a) 16*a^2 + a - 4 sage: chebyshev_T(5,MatrixSpace(ZZ, 2)([1, 2, -4, 7])) [-40799 44162] [-88324 91687] sage: R.<x> = QQ[] sage: parent(chebyshev_T(5, x)) Univariate Polynomial Ring in x over Rational Field sage: chebyshev_T(5, 2, hold=True) chebyshev_T(5, 2) sage: chebyshev_T(1,2,3) Traceback (most recent call last): ... TypeError:
504 , 486, 487, 522 , 522, 487, 523 , 507, 515, 485 , 485, 515, 514 , 508, 511, 509 , 509, 511, 510 , 511, 488, 510 , 510, 488, 489 , 512, 508, 513 , 513, 508, 509 , 515, 512, 514 , 514, 512, 513 , 516, 519, 517 , 517, 519, 518 , 519, 507, 518 , 518, 507, 485 , 520, 516, 521 , 521, 516, 517 , 523, 520, 522 , 522, 520, 521 , 488, 525, 506 , 506, 525, 524 , 490, 527, 498 , 498, 527, 526 , 491, 529, 494 , 494, 529, 528 , 494, 528, 487 , 487, 528, 530 , 495, 531, 491 , 491, 531, 529 , 498, 526, 495 , 495, 526, 531 , 499, 533, 502 , 502, 533, 532 , 502, 532, 490 , 490, 532, 527 , 503, 534, 499 , 499, 534, 533 , 506, 524, 503 , 503, 524, 534 , 487, 530, 523 , 523, 530, 535 , 507, 537, 515 , 515, 537, 536 , 508, 539, 511 , 511, 539, 538 , 511, 538, 488 , 488, 538, 525 , 512, 540, 508 , 508, 540, 539 , 515, 536, 512 , 512, 536, 540 , 516, 542, 519 , 519, 542, 541 , 519, 541, 507 , 507, 541, 537 , 520, 543, 516 , 516, 543, 542 , 523, 535, 520 , 520, 535, 543 , 544, 545, 564 , 564, 545, 565 , 545, 546, 565 , 565, 546, 566 , 547, 544, 567 , 567, 544, 564 , 548, 549, 568 , 568, 549, 569 , 549, 547, 569 , 569, 547, 567 , 550, 551, 570 , 570, 551, 571 , 551, 548, 571 , 571, 548, 568 , 552, 550, 572 , 572, 550, 570 , 553, 554, 573 , 573, 554, 574 , 554, 552, 574 , 574, 552, 572 , 555, 556, 575 , 575, 556, 576 , 556, 553, 576 , 576, 553, 573 , 557, 555, 577 , 577, 555, 575 , 558, 559, 578 , 578, 559, 579 , 559, 557, 579 , 579, 557, 577 , 560, 561, 580 , 580, 561, 581 , 561, 558, 581 , 581, 558, 578 , 562, 560, 582 , 582, 560, 580 , 546, 563, 566 , 566, 563, 583 , 563, 562, 583 , 583, 562, 582 , 564, 565, 584 , 584, 565, 585 , 565, 566, 585 , 585, 566, 586 , 567, 564, 587 , 587, 564, 584 , 568, 569, 588 , 588, 569, 589 , 569, 567, 589 , 589, 567, 587 , 570, 571, 590 , 590, 571, 591 , 571, 568, 591 , 591, 568, 588 , 572, 570, 592 , 592, 570, 590 , 573, 574, 593 , 593, 574, 594 , 574, 572, 594 , 594, 572, 592 , 575, 576, 595 , 595, 576, 596 , 576, 573, 596 , 596, 573, 593 , 577, 575, 597 , 597, 575, 595 , 578, 579, 598 , 598, 579, 599 , 579, 577, 599 , 599, 577, 597 , 580, 581, 600 , 600, 581, 601 , 581, 578, 601 , 601, 578, 598 , 582, 580, 602 , 602, 580, 600 , 566, 583, 586 , 586, 583, 603 , 583, 582, 603 , 603, 582, 602 , 585, 605, 584 , 584, 605, 604 , 586, 606, 585 , 585, 606, 605 , 584, 604, 587 , 587, 604, 607 , 589, 609, 588 , 588, 609, 608 , 587, 607, 589 , 589, 607, 609 , 591, 611, 590 , 590, 611, 610 , 588, 608, 591 , 591, 608, 611 , 590, 610, 592 , 592, 610, 612 , 594, 614, 593 , 593, 614, 613 , 592, 612, 594 , 594, 612, 614 , 596, 616, 595 , 595, 616, 615 , 593, 613, 596 , 596, 613, 616 , 595, 615, 597 , 597, 615, 617 , 599, 619, 598 , 598, 619, 618 , 597, 617, 599 , 599, 617, 619 , 601, 621, 600 , 600, 621, 620 , 598, 618, 601 , 601, 618, 621 , 600, 620, 602 , 602, 620, 622 , 603, 623, 586 , 586, 623, 606 , 602, 622, 603 , 603, 622, 623 , 625, 624, 605 , 605, 624, 604 , 626, 625, 606 , 606, 625, 605 , 604, 624, 607 , 607, 624, 627 , 629, 628, 609 , 609, 628, 608 , 627, 629, 607 , 607, 629, 609 , 631, 630, 611 , 611, 630, 610 , 628, 631, 608 , 608, 631, 611 , 630, 632, 610 , 610, 632, 612 , 634, 633, 614 , 614, 633, 613 , 632, 634, 612 , 612, 634, 614 , 636, 635, 616 , 616, 635, 615 , 633, 636, 613 , 613, 636, 616 , 635, 637, 615 , 615, 637, 617 , 639, 638, 619 , 619, 638, 618 , 637, 639, 617 , 617, 639, 619 , 641, 640, 621 , 621, 640, 620 , 638, 641, 618 , 618, 641, 621 , 640, 642, 620 , 620, 642, 622 , 643, 626, 623 , 623, 626, 606 , 642, 643, 622 , 622, 643, 623 , 624, 625, 644 , 644, 625, 645 , 625, 626, 645 , 645, 626, 646 , 627, 624, 647 , 647, 624, 644 , 628, 629, 648 , 648, 629, 649 , 629, 627, 649 , 649, 627, 647 , 630, 631, 650 , 650, 631, 651 , 631, 628, 651 , 651, 628, 648 , 632, 630, 652 , 652, 630, 650 , 633, 634, 653 , 653, 634, 654 , 634, 632, 654 , 654, 632, 652 , 635, 636, 655 , 655, 636, 656 , 636, 633, 656 , 656, 633, 653 , 637, 635, 657 , 657, 635, 655 , 638, 639, 658 , 658, 639, 659 , 639, 637, 659 , 659, 637, 657 , 640, 641, 660 , 660, 641, 661 , 641, 638, 661 , 661, 638, 658 , 642, 640, 662 , 662, 640, 660 , 626, 643, 646 , 646, 643, 663 , 643, 642, 663 , 663, 642, 662 , 664, 665, 666 , 666, 665, 723 , 665, 664, 777 , 777, 664, 776 , 667, 668, 670 , 670, 668, 669 , 778, 779, 781 , 781, 779, 780 , 671, 666, 775 , 667, 670, 778 , 778, 670, 779 , 666, 671, 664 , 776, 664, 782 , 782, 664, 673 , 782, 673, 783 , 783, 673, 674 , 675, 676, 677 , 783, 674, 784 , 784, 674, 678 , 679, 668, 667 , 784, 678, 785 , 785, 678, 680 , 785, 680, 786 , 786, 680, 681 , 786, 681, 787 , 787, 681, 682 , 682, 683, 787 , 787, 683, 788 , 667, 684, 679 , 778, 789, 667 , 667, 789, 686 , 791, 685, 790 , 790, 685, 687 , 789, 792, 686 , 686, 792, 689 , 687, 688, 790 , 790, 688, 793 , 792, 794, 689 , 689, 794, 691 , 688, 690, 793 , 793, 690, 795 , 794, 795, 691 , 691, 795, 690 , 671, 672, 693 , 693, 672, 692 , 693, 692, 695 , 695, 692, 694 , 695, 694, 697 , 697, 694, 696 , 664, 671, 673 , 673, 671, 693 , 673, 693, 674 , 674, 693, 695 , 697, 696, 699 , 699, 696, 698 , 782, 796, 776 , 776, 796, 797 , 674, 695, 678 , 678, 695, 697 , 699, 698,
<filename>juju/client/_client3.py # DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py. # Changes will be overwritten/lost when the file is regenerated. from juju.client.facade import Type, ReturnMapping from juju.client._definitions import * class ActionFacade(Type): name = 'Action' version = 3 schema = {'definitions': {'Action': {'additionalProperties': False, 'properties': {'name': {'type': 'string'}, 'parameters': {'patternProperties': {'.*': {'additionalProperties': True, 'type': 'object'}}, 'type': 'object'}, 'receiver': {'type': 'string'}, 'tag': {'type': 'string'}}, 'required': ['tag', 'receiver', 'name'], 'type': 'object'}, 'ActionResult': {'additionalProperties': False, 'properties': {'action': {'$ref': '#/definitions/Action'}, 'completed': {'format': 'date-time', 'type': 'string'}, 'enqueued': {'format': 'date-time', 'type': 'string'}, 'error': {'$ref': '#/definitions/Error'}, 'message': {'type': 'string'}, 'output': {'patternProperties': {'.*': {'additionalProperties': True, 'type': 'object'}}, 'type': 'object'}, 'started': {'format': 'date-time', 'type': 'string'}, 'status': {'type': 'string'}}, 'type': 'object'}, 'ActionResults': {'additionalProperties': False, 'properties': {'results': {'items': {'$ref': '#/definitions/ActionResult'}, 'type': 'array'}}, 'type': 'object'}, 'ActionSpec': {'additionalProperties': False, 'properties': {'description': {'type': 'string'}, 'params': {'patternProperties': {'.*': {'additionalProperties': True, 'type': 'object'}}, 'type': 'object'}}, 'required': ['description', 'params'], 'type': 'object'}, 'Actions': {'additionalProperties': False, 'properties': {'actions': {'items': {'$ref': '#/definitions/Action'}, 'type': 'array'}}, 'type': 'object'}, 'ActionsByName': {'additionalProperties': False, 'properties': {'actions': {'items': {'$ref': '#/definitions/ActionResult'}, 'type': 'array'}, 'error': {'$ref': '#/definitions/Error'}, 'name': {'type': 'string'}}, 'type': 'object'}, 'ActionsByNames': {'additionalProperties': False, 'properties': {'actions': {'items': {'$ref': '#/definitions/ActionsByName'}, 'type': 'array'}}, 'type': 'object'}, 'ActionsByReceiver': {'additionalProperties': False, 'properties': {'actions': {'items': {'$ref': '#/definitions/ActionResult'}, 'type': 'array'}, 'error': {'$ref': '#/definitions/Error'}, 'receiver': {'type': 'string'}}, 'type': 'object'}, 'ActionsByReceivers': {'additionalProperties': False, 'properties': {'actions': {'items': {'$ref': '#/definitions/ActionsByReceiver'}, 'type': 'array'}}, 'type': 'object'}, 'ApplicationCharmActionsResult': {'additionalProperties': False, 'properties': {'actions': {'patternProperties': {'.*': {'$ref': '#/definitions/ActionSpec'}}, 'type': 'object'}, 'application-tag': {'type': 'string'}, 'error': {'$ref': '#/definitions/Error'}}, 'type': 'object'}, 'ApplicationsCharmActionsResults': {'additionalProperties': False, 'properties': {'results': {'items': {'$ref': '#/definitions/ApplicationCharmActionsResult'}, 'type': 'array'}}, 'type': 'object'}, 'Entities': {'additionalProperties': False, 'properties': {'entities': {'items': {'$ref': '#/definitions/Entity'}, 'type': 'array'}}, 'required': ['entities'], 'type': 'object'}, 'Entity': {'additionalProperties': False, 'properties': {'tag': {'type': 'string'}}, 'required': ['tag'], 'type': 'object'}, 'Error': {'additionalProperties': False, 'properties': {'code': {'type': 'string'}, 'info': {'patternProperties': {'.*': {'additionalProperties': True, 'type': 'object'}}, 'type': 'object'}, 'message': {'type': 'string'}}, 'required': ['message', 'code'], 'type': 'object'}, 'FindActionsByNames': {'additionalProperties': False, 'properties': {'names': {'items': {'type': 'string'}, 'type': 'array'}}, 'type': 'object'}, 'FindTags': {'additionalProperties': False, 'properties': {'prefixes': {'items': {'type': 'string'}, 'type': 'array'}}, 'required': ['prefixes'], 'type': 'object'}, 'FindTagsResults': {'additionalProperties': False, 'properties': {'matches': {'patternProperties': {'.*': {'items': {'$ref': '#/definitions/Entity'}, 'type': 'array'}}, 'type': 'object'}}, 'required': ['matches'], 'type': 'object'}, 'RunParams': {'additionalProperties': False, 'properties': {'applications': {'items': {'type': 'string'}, 'type': 'array'}, 'commands': {'type': 'string'}, 'machines': {'items': {'type': 'string'}, 'type': 'array'}, 'timeout': {'type': 'integer'}, 'units': {'items': {'type': 'string'}, 'type': 'array'}}, 'required': ['commands', 'timeout'], 'type': 'object'}}, 'properties': {'Actions': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ActionResults'}}, 'type': 'object'}, 'ApplicationsCharmsActions': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ApplicationsCharmActionsResults'}}, 'type': 'object'}, 'Cancel': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ActionResults'}}, 'type': 'object'}, 'Enqueue': {'properties': {'Params': {'$ref': '#/definitions/Actions'}, 'Result': {'$ref': '#/definitions/ActionResults'}}, 'type': 'object'}, 'FindActionTagsByPrefix': {'properties': {'Params': {'$ref': '#/definitions/FindTags'}, 'Result': {'$ref': '#/definitions/FindTagsResults'}}, 'type': 'object'}, 'FindActionsByNames': {'properties': {'Params': {'$ref': '#/definitions/FindActionsByNames'}, 'Result': {'$ref': '#/definitions/ActionsByNames'}}, 'type': 'object'}, 'ListAll': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ActionsByReceivers'}}, 'type': 'object'}, 'ListCompleted': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ActionsByReceivers'}}, 'type': 'object'}, 'ListPending': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ActionsByReceivers'}}, 'type': 'object'}, 'ListRunning': {'properties': {'Params': {'$ref': '#/definitions/Entities'}, 'Result': {'$ref': '#/definitions/ActionsByReceivers'}}, 'type': 'object'}, 'Run': {'properties': {'Params': {'$ref': '#/definitions/RunParams'}, 'Result': {'$ref': '#/definitions/ActionResults'}}, 'type': 'object'}, 'RunOnAllMachines': {'properties': {'Params': {'$ref': '#/definitions/RunParams'}, 'Result': {'$ref': '#/definitions/ActionResults'}}, 'type': 'object'}}, 'type': 'object'} @ReturnMapping(ActionResults) async def Actions(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ActionResults ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='Actions', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ApplicationsCharmActionsResults) async def ApplicationsCharmsActions(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ApplicationsCharmActionsResults ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='ApplicationsCharmsActions', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ActionResults) async def Cancel(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ActionResults ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='Cancel', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ActionResults) async def Enqueue(self, actions=None): ''' actions : typing.Sequence[~Action] Returns -> ActionResults ''' if actions is not None and not isinstance(actions, (bytes, str, list)): raise Exception("Expected actions to be a Sequence, received: {}".format(type(actions))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='Enqueue', version=3, params=_params) _params['actions'] = actions reply = await self.rpc(msg) return reply @ReturnMapping(FindTagsResults) async def FindActionTagsByPrefix(self, prefixes=None): ''' prefixes : typing.Sequence[str] Returns -> FindTagsResults ''' if prefixes is not None and not isinstance(prefixes, (bytes, str, list)): raise Exception("Expected prefixes to be a Sequence, received: {}".format(type(prefixes))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='FindActionTagsByPrefix', version=3, params=_params) _params['prefixes'] = prefixes reply = await self.rpc(msg) return reply @ReturnMapping(ActionsByNames) async def FindActionsByNames(self, names=None): ''' names : typing.Sequence[str] Returns -> ActionsByNames ''' if names is not None and not isinstance(names, (bytes, str, list)): raise Exception("Expected names to be a Sequence, received: {}".format(type(names))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='FindActionsByNames', version=3, params=_params) _params['names'] = names reply = await self.rpc(msg) return reply @ReturnMapping(ActionsByReceivers) async def ListAll(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ActionsByReceivers ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='ListAll', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ActionsByReceivers) async def ListCompleted(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ActionsByReceivers ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='ListCompleted', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ActionsByReceivers) async def ListPending(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ActionsByReceivers ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='ListPending', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ActionsByReceivers) async def ListRunning(self, entities=None): ''' entities : typing.Sequence[~Entity] Returns -> ActionsByReceivers ''' if entities is not None and not isinstance(entities, (bytes, str, list)): raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='ListRunning', version=3, params=_params) _params['entities'] = entities reply = await self.rpc(msg) return reply @ReturnMapping(ActionResults) async def Run(self, applications=None, commands=None, machines=None, timeout=None, units=None): ''' applications : typing.Sequence[str] commands : str machines : typing.Sequence[str] timeout : int units : typing.Sequence[str] Returns -> ActionResults ''' if applications is not None and not isinstance(applications, (bytes, str, list)): raise Exception("Expected applications to be a Sequence, received: {}".format(type(applications))) if commands is not None and not isinstance(commands, (bytes, str)): raise Exception("Expected commands to be a str, received: {}".format(type(commands))) if machines is not None and not isinstance(machines, (bytes, str, list)): raise Exception("Expected machines to be a Sequence, received: {}".format(type(machines))) if timeout is not None and not isinstance(timeout, int): raise Exception("Expected timeout to be a int, received: {}".format(type(timeout))) if units is not None and not isinstance(units, (bytes, str, list)): raise Exception("Expected units to be a Sequence, received: {}".format(type(units))) # map input types to rpc msg _params = dict() msg = dict(type='Action', request='Run', version=3, params=_params) _params['applications'] = applications _params['commands'] = commands _params['machines'] = machines _params['timeout'] = timeout _params['units'] = units reply = await self.rpc(msg) return reply @ReturnMapping(ActionResults) async def RunOnAllMachines(self, applications=None, commands=None, machines=None, timeout=None, units=None): ''' applications : typing.Sequence[str] commands : str machines : typing.Sequence[str] timeout : int units : typing.Sequence[str] Returns -> ActionResults ''' if applications is not None and not isinstance(applications, (bytes, str, list)): raise Exception("Expected applications
to indicate whether 'angle' is in degrees or radians. If True, 'angle' is interpreted as radians. :return: CompoundGeometry object rotated by 'angle' about 'rot_point' :rtype: :class:`~sectionproperties.pre.geometry.CompoundGeometry` The following example rotates a 200UB25 section with a plate clockwise by 30 degrees:: import sectionproperties.pre.library.steel_sections as steel_sections import sectionproperties.pre.library.primitive_sections as primitive_sections geometry_1 = steel_sections.i_section(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8) geometry_2 = primitive_sections.rectangular_section(d=20, b=133) compound = geometry_2.align_center(geometry_1).align_to(geometry_1, on="top") + geometry_1 new_compound = compound.rotate_section(angle=-30) """ geoms_acc = [] if rot_point == "center": rot_point = box(*MultiPolygon(self.geom).bounds).centroid for geom in self.geoms: geoms_acc.append(geom.rotate_section(angle, rot_point, use_radians)) new_geom = CompoundGeometry(geoms_acc) return new_geom def mirror_section( self, axis="x", mirror_point: Union[List[float], str] = "center" ): """Mirrors the geometry about a point on either the x or y-axis. :param string axis: Axis about which to mirror the geometry, *'x'* or *'y'* :param mirror_point: Point about which to mirror the geometry *(x, y)*. If no point is provided, mirrors the geometry about the centroid of the shape's bounding box. Default = 'center'. :type mirror_point: Union[list[float, float], str] :return: Geometry object mirrored on 'axis' about 'mirror_point' :rtype: :class:`~sectionproperties.pre.geometry.Geometry` The following example mirrors a 200PFC section with a plate about the y-axis and the point (0, 0):: import sectionproperties.pre.library.steel_sections as steel_sections import sectionproperties.pre.library.primitive_sections as primitive_sections geometry_1 = steel_sections.channel_section(d=200, b=75, t_f=12, t_w=6, r=12, n_r=8) geometry_2 = primitive_sections.rectangular_section(d=20, b=133) compound = geometry_2.align_center(geometry_1).align_to(geometry_1, on="top") + geometry_1 new_compound = compound.mirror_section(axis='y', mirror_point=[0,0]) """ geoms_acc = [] for geom in self.geoms: geoms_acc.append(geom.mirror_section(axis, mirror_point)) new_geom = CompoundGeometry(geoms_acc) return new_geom def align_center(self, align_to: Optional[Geometry] = None): """ Returns a new CompoundGeometry object, translated in both x and y, so that the center-point of the new object's material-weighted centroid will be aligned with centroid of the object in 'align_to'. If 'align_to' is None then the new object will be aligned with it's centroid at the origin. Note: The material-weighted centroid refers to when individual geometries within the CompoundGeometry object have been assigned differing materials. The centroid of the compound geometry is calculated by using the E modulus of each geometry's assigned material. :param align_to: Another Geometry to align to or None (default is None) :type align_to: Optional[:class:`~sectionproperties.pre.geometry.Geometry`] :return: Geometry object translated to new alignment :rtype: :class:`~sectionproperties.pre.geometry.Geometry` """ EA_sum = sum( [ geom.material.elastic_modulus * geom.calculate_area() for geom in self.geoms ] ) cx_EA_acc = 0 cy_EA_acc = 0 for geom in self.geoms: E = geom.material.elastic_modulus A = geom.calculate_area() EA = E * A cx, cy = list(geom.geom.centroid.coords[0]) cx_EA_acc += cx * EA cy_EA_acc += cy * EA weighted_cx = cx_EA_acc / (EA_sum) weighted_cy = cy_EA_acc / (EA_sum) if align_to is None: shift_x, shift_y = ( round(-weighted_cx, self.tol), round(-weighted_cy, self.tol), ) else: align_cx, align_cy = list(align_to.geom.centroid.coords)[0] shift_x = round(align_cx - weighted_cx, self.tol) shift_y = round(align_cy - weighted_cy, self.tol) new_geom = self.shift_section(x_offset=shift_x, y_offset=shift_y) return new_geom def split_section( self, point_i: Tuple[float, float], point_j: Optional[Tuple[float, float]] = None, vector: Union[Optional[Tuple[float, float]], np.ndarray] = None, ) -> Tuple[List[Geometry], List[Geometry]]: """Splits, or bisects, the geometry about an infinite line, as defined by two points on the line or by one point on the line and a vector. Either ``point_j`` or ``vector`` must be given. If ``point_j`` is given, ``vector`` is ignored. Returns a tuple of two lists each containing new Geometry instances representing the "top" and "bottom" portions, respectively, of the bisected geometry. If the line is a vertical line then the "right" and "left" portions, respectively, are returned. :param point_i: A tuple of *(x, y)* coordinates to define a first point on the line :type point_i: Tuple[float, float] :param point_j: Optional. A tuple of *(x, y)* coordinates to define a second point on the line :type point_j: Tuple[float, float] :param vector: Optional. A tuple or numpy ndarray of *(x, y)* components to define the line direction. :type vector: Union[Tuple[float, float], numpy.ndarray] :return: A tuple of lists containing Geometry objects that are bisected about the infinite line defined by the two given points. The first item in the tuple represents the geometries on the "top" of the line (or to the "right" of the line, if vertical) and the second item represents the geometries to the "bottom" of the line (or to the "left" of the line, if vertical). :rtype: Tuple[List[Geometry], List[Geometry]] The following example splits a 200PFC section about the y-axis:: import sectionproperties.pre.library.steel_sections as steel_sections from shapely.geometry import LineString geometry = steel_sections.channel_section(d=200, b=75, t_f=12, t_w=6, r=12, n_r=8) right_geom, left_geom = geometry.split_section((0, 0), (0, 1)) """ top_geoms_acc = [] bottom_geoms_acc = [] for geom in self.geoms: top_geoms, bottom_geoms = geom.split_section(point_i, point_j, vector) top_geoms_acc += top_geoms bottom_geoms_acc += bottom_geoms return (top_geoms_acc, bottom_geoms_acc) def offset_perimeter( self, amount: float = 0, where="exterior", resolution: float = 12 ): """Dilates or erodes the perimeter of a CompoundGeometry object by a discrete amount. :param amount: Distance to offset the section by. A -ve value "erodes" the section. A +ve value "dilates" the section. :type amount: float :param where: One of either "exterior", "interior", or "all" to specify which edges of the geometry to offset. If geometry has no interiors, then this parameter has no effect. Default is "exterior". :type where: str :param resolution: Number of segments used to approximate a quarter circle around a point :type resolution: float :return: Geometry object translated to new alignment :rtype: :class:`~sectionproperties.pre.geometry.Geometry` The following example erodes a 200UB25 with a 12 plate stiffener section by 2 mm:: import sectionproperties.pre.library.steel_sections as steel_sections import sectionproperties.pre.library.primitive_sections as primitive_sections geometry_1 = steel_sections.i_section(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8) geometry_2 = primitive_sections.rectangular_section(d=12, b=133) compound = geometry_2.align_center(geometry_1).align_to(geometry_1, on="top") + geometry_1 new_geometry = compound.offset_perimeter(amount=-2) .. note:: If performing a positive offset on a CompoundGeometry with multiple materials, ensure that the materials propagate as desired by performing a .plot_mesh() prior to performing any analysis. """ if amount < 0: # Eroding condition unionized_poly = unary_union([geom.geom for geom in self.geoms]) offset_geom = Geometry(unionized_poly).offset_perimeter( amount, where, resolution ) # Using the offset_geom as a "mask" geoms_acc = [] for geom in self.geoms: # Use symmetric intersection to find the region of the original # that intersects with the eroded unionized shape intersection_geom = geom & offset_geom if not intersection_geom.geom.is_empty: geoms_acc.append(intersection_geom) new_geom = CompoundGeometry(geoms_acc) return new_geom elif amount > 0: # Ballooning condition # This produces predictable results up to a point. # That point is when the offset is so great it exceeds the thickness # of the material at an interface of two materials. # e.g. A 50 deep plate on top of the top flange of an I Section with a flange depth of 10 # When the offset exceeds 10 (the depth of the flange at the intersection), the meshed # material regions will become unpredictable. geoms_acc = [] for i_idx, geom in enumerate(self.geoms): # Offset each geom... offset_geom = geom.offset_perimeter(amount, where, resolution) for j_idx, orig_geom in enumerate(self.geoms): if i_idx != j_idx: # ... then remove the parts that intersect with the other # constituents of the compound geometry (because they are # occupying that space already) offset_geom = offset_geom - orig_geom if not offset_geom.geom.is_empty: geoms_acc.append(offset_geom) new_geom = CompoundGeometry(geoms_acc) return new_geom else: return self def compile_geometry(self): """ Converts the shapely.geometry.Polygon objects stored in self.geoms into lists of points, facets, control_points, and hole points. """ self.points = [] self.facets = [] self.control_points = [] self.holes = [] # loop through all sections for geom in self.geoms: if not all([geom.points, geom.facets, geom.control_points]): geom.create_facets_and_control_points() # If not previously done # add points and skip duplicate points for point in geom.points: if list(point) not in self.points: self.points.append(list(point)) # The facet numbering from the constituent Polygon is no longer valid # in the MultiPolygon. # We need to map the facets from the original Polygon points to the new # collected MultiPolygon points, which are in a different order. # Because points are not in their "original" order, we have to find the matching point # in the new self.points list and map the facet from the old
""" ******************************************************************************** 2D Robot Localization - Benchmark ******************************************************************************** Goals of this script: - implement different UKFs on the 2D robot localization example. - design the Extended Kalman Filter (EKF) and the Invariant Extended Kalman Filter (IEKF) :cite:`barrauInvariant2017`. - compare the different algorithms with Monte-Carlo simulations. *We assume the reader is already familiar with the considered problem described in the tutorial.* We previously designed an UKF with a standard uncertainty representation. An advantage of the versatility of the UKF is to speed up implementation, tests, and comparision of algorithms with different uncertainty representations. Indeed, for the given problem, three different UKFs emerge, defined respectively as: 1) The state is embedded in :math:`SO(2) \\times \mathbb{R}^2`, where: * the retraction :math:`\\varphi(.,.)` is the :math:`SO(2)` exponential for orientation and the vector addition for position. * the inverse retraction :math:`\\varphi^{-1}(.,.)` is the :math:`SO(2)` logarithm for orientation and the vector subtraction for position. 2) The state is embedded in :math:`SE(2)` with left multiplication, i.e. - the retraction :math:`\\varphi(.,.)` is the :math:`SE(2)` exponential, where the state multiplies on the left the uncertainty :math:`\\boldsymbol{\\xi}`. - the inverse retraction :math:`\\varphi^{-1}(.,.)` is the :math:`SE(2)` logarithm. - this left UKF on :math:`SE(2)` corresponds to the Invariant Extended Kalman Filter (IEKF) recommended in :cite:`barrauInvariant2017`. 3) The state is embedded in :math:`SE(2)` with right multiplication, i.e. - the retraction :math:`\\varphi(.,.)` is the :math:`SE(2)` exponential, where the state multiplies on the right the uncertainty :math:`\\boldsymbol{\\xi}`. - the inverse retraction :math:`\\varphi^{-1}(.,.)` is the :math:`SE(2)` logarithm. We tests the filters on simulation with strong initial heading error. """ ################################################################################ # Import # ============================================================================== from ukfm import SO2, UKF, EKF from ukfm import LOCALIZATION as MODEL import ukfm import numpy as np import matplotlib ukfm.utils.set_matplotlib_config() ################################################################################ # We compare the filters on a large number of Monte-Carlo runs. # Monte-Carlo runs N_mc = 100 ################################################################################ # Simulation Setting # ============================================================================== # We set the simulation as in :cite:`barrauInvariant2017`, section IV. The robot # drives along a 10 m diameter circle for 40 seconds with high rate odometer # measurements (100 Hz) and low rate GPS measurements (1 Hz). The vehicle gets # moderate angular velocity uncertainty and highly precise linear velocity. The # initial values of the heading error is very strong, **45° standard # deviation**, while the initial position is known. # sequence time (s) T = 40 # odometry frequency (Hz) odo_freq = 100 # create the model model = MODEL(T, odo_freq) # odometry noise standard deviation odo_std = np.array([0.01, # speed (v/m) 0.01, # speed (v/m) 1 / 180 * np.pi]) # angular speed (rad/s) # GPS frequency (Hz) gps_freq = 1 # GPS noise standard deviation (m) gps_std = 1 # radius of the circle trajectory (m) radius = 5 # initial heading error standard deviation theta0_std = 45/180*np.pi ################################################################################ # Filter Design # ============================================================================== # The UKFs are compared to an Extended Kalman FIlter (EKF) and an Invariant EKF # (IEKF). The EKF has the same uncertainty representation as the UKF with the # retraction on :math:`SO(2) \times \mathbb{R}^2`, whereas the IEKF has the same # uncertainty representation as the UKF with the left retraction on # :math:`SE(2)`. # propagation noise covariance matrix Q = np.diag(odo_std**2) # measurement noise covariance matrix R = gps_std**2*np.eye(2) # initial covariance matrix P0 = np.zeros((3, 3)) # we take into account initial heading error P0[0, 0] = theta0_std ** 2 # sigma point parameter alpha = np.array([1e-3, 1e-3, 1e-3]) ################################################################################ # We set error variables before launching Monte-Carlo simulations. As we have # five similar methods, the code is redundant. ukf_err = np.zeros((N_mc, model.N, 3)) left_ukf_err = np.zeros_like(ukf_err) right_ukf_err = np.zeros_like(ukf_err) iekf_err = np.zeros_like(ukf_err) ekf_err = np.zeros_like(ukf_err) ################################################################################ # We record Normalized Estimation Error Squared (NEES) for consistency # evaluation (see Results). ukf_nees = np.zeros((N_mc, model.N, 2)) left_ukf_nees = np.zeros_like(ukf_nees) right_ukf_nees = np.zeros_like(ukf_nees) iekf_nees = np.zeros_like(ukf_nees) ekf_nees = np.zeros_like(ukf_nees) ################################################################################ # Monte-Carlo Runs # ============================================================================== # We run the Monte-Carlo through a for loop. # # .. note:: # # We sample for each Monte-Carlo run an initial heading error from the true # distribution (:math:`\mathbf{P}_0`). This requires many Monte-Carlo # samples. for n_mc in range(N_mc): print("Monte-Carlo iteration(s): " + str(n_mc + 1) + "/" + str(N_mc)) # simulation true trajectory states, omegas = model.simu_f(odo_std, radius) # simulate measurement ys, one_hot_ys = model.simu_h(states, gps_freq, gps_std) # initialize filter with inaccurate state state0 = model.STATE( Rot=states[0].Rot.dot(SO2.exp(theta0_std * np.random.randn(1))), p=states[0].p) # define the filters ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R, phi=model.phi, phi_inv=model.phi_inv, alpha=alpha) left_ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R, phi=model.left_phi, phi_inv=model.left_phi_inv, alpha=alpha) right_ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R, phi=model.right_phi, phi_inv=model.right_phi_inv, alpha=alpha) iekf = EKF(model=model, state0=state0, P0=P0, Q=Q, R=R, FG_ana=model.iekf_FG_ana, H_ana=model.iekf_H_ana, phi=model.left_phi) ekf = EKF(model=model, state0=state0, P0=P0, Q=Q, R=R, FG_ana=model.ekf_FG_ana, H_ana=model.ekf_H_ana, phi=model.phi) # variables for recording estimates of the Monte-Carlo run ukf_states = [state0] left_states = [state0] right_states = [state0] iekf_states = [state0] ekf_states = [state0] ukf_Ps = np.zeros((model.N, 3, 3)) left_ukf_Ps = np.zeros_like(ukf_Ps) right_ukf_Ps = np.zeros_like(ukf_Ps) ekf_Ps = np.zeros_like(ukf_Ps) iekf_Ps = np.zeros_like(ukf_Ps) ukf_Ps[0] = P0 left_ukf_Ps[0] = P0 right_ukf_Ps[0] = P0 ekf_Ps[0] = P0 iekf_Ps[0] = P0 # measurement iteration number k = 1 # filtering loop for n in range(1, model.N): ukf.propagation(omegas[n-1], model.dt) left_ukf.propagation(omegas[n-1], model.dt) right_ukf.propagation(omegas[n-1], model.dt) iekf.propagation(omegas[n-1], model.dt) ekf.propagation(omegas[n-1], model.dt) # update only if a measurement is received if one_hot_ys[n] == 1: ukf.update(ys[k]) left_ukf.update(ys[k]) right_ukf.update(ys[k]) iekf.update(ys[k]) ekf.update(ys[k]) k = k + 1 ukf_states.append(ukf.state) left_states.append(left_ukf.state) right_states.append(right_ukf.state) iekf_states.append(iekf.state) ekf_states.append(ekf.state) ukf_Ps[n] = ukf.P left_ukf_Ps[n] = left_ukf.P right_ukf_Ps[n] = right_ukf.P iekf_Ps[n] = iekf.P ekf_Ps[n] = ekf.P # get state trajectory Rots, ps = model.get_states(states, model.N) ukf_Rots, ukf_ps = model.get_states(ukf_states, model.N) left_ukf_Rots, left_ukf_ps = model.get_states(left_states, model.N) right_ukf_Rots, right_ukf_ps = model.get_states(right_states, model.N) iekf_Rots, iekf_ps = model.get_states(iekf_states, model.N) ekf_Rots, ekf_ps = model.get_states(ekf_states, model.N) # record errors ukf_err[n_mc] = model.errors(Rots, ukf_Rots, ps, ukf_ps) left_ukf_err[n_mc] = model.errors(Rots, left_ukf_Rots, ps, left_ukf_ps) right_ukf_err[n_mc] = model.errors(Rots, right_ukf_Rots, ps, right_ukf_ps) iekf_err[n_mc] = model.errors(Rots, iekf_Rots, ps, iekf_ps) ekf_err[n_mc] = model.errors(Rots, ekf_Rots, ps, ekf_ps) # record NEES ukf_nees[n_mc] = model.nees(ukf_err[n_mc], ukf_Ps, ukf_Rots, ukf_ps, 'STD') left_ukf_nees[n_mc] = model.nees(left_ukf_err[n_mc], left_ukf_Ps, left_ukf_Rots, left_ukf_ps, 'LEFT') right_ukf_nees[n_mc] = model.nees(right_ukf_err[n_mc], right_ukf_Ps, right_ukf_Rots, right_ukf_ps, 'RIGHT') iekf_nees[n_mc] = model.nees(iekf_err[n_mc], iekf_Ps, iekf_Rots, iekf_ps, 'LEFT') ekf_nees[n_mc] = model.nees(ekf_err[n_mc], ekf_Ps, ekf_Rots, ekf_ps, 'STD') ################################################################################ # Results # ============================================================================== # We first visualize the robot trajectory (for the last run) and the errors # w.r.t. orientation and position (averaged over Monte-Carlo). As simulations # have random process, the trajectory plot just gives us an indication but not a # proof of performances. ukf_e, left_ukf_e, right_ukf_e, iekf_e, ekf_e = model.benchmark_plot( ukf_err, left_ukf_err, right_ukf_err, iekf_err, ekf_err, ps, ukf_ps, left_ukf_ps, right_ukf_ps, ekf_ps, iekf_ps) ################################################################################ # Two groups of filters emerge: group 1) consists of EKF and :math:`SO(2) \times # \mathbb{R}^2` UKF; and group 2) have IEKF, left :math:`SE(2)` UKF and right # :math:`SE(2)` UKF (the curves of these filters are superposed). The second # group is visibly highly better regarding position estimation. # # More statictical is to compute the results averaged over all the Monte-Carlo. # Let us compute the Root Mean Squared Error (RMSE) for each method both for the # orientation and the position. model.benchmark_print(ukf_e, left_ukf_e, right_ukf_e, iekf_e, ekf_e) ################################################################################ # They confirm the results on the plot. # # A consistency metric is the Normalized Estimation Error Squared (NEES). # Classical criteria used to evaluate the performance of an estimation method, # like the RMSE, do not inform about consistency as they do not take into # account the uncertainty returned by the filter. This point is addressed by the # NEES, which computes the average squared value of the error, normalized by the # covariance matrix of the filter. The case NEES>1 reveals an inconsistency # issue: the actual uncertainty is higher than the computed uncertainty. model.nees_print(ukf_nees, left_ukf_nees, right_ukf_nees, iekf_nees, ekf_nees) ################################################################################ # As the filters are initialized with perfect position and zero covariance # w.r.t. position, we compute NEES only after 20 s for avoiding numerical issues # (during the first secondes of the trajectory the covariance matrix # :math:`\mathbf{P}_n` is very low so inverting it leads to insignificantly high # numbers). Results are clear, the :math:`SE(2)` UKF are the more consistent. ################################################################################ # **Which filter is the best ?** In this setting, the **left UKF**, the # **right UKF** and the IEKF filters obtain similar accurate results, that # clearly outperform :math:`SO(2) \times \mathbb{R}^2` UKF, and EKF, whereas the # two UKFs are the more consistent. # # .. note:: # # We have set all the filters with the same "true" noise covariance # parameters. However, both EKF and UKF based algorithms may better deal , # with non-linearity by e.g. inflated propagation noise covariance. # ################################################################################ # Conclusion # ============================================================================== # This script compares different algorithms for 2D robot localization. Two # groups of
<reponame>elxavicio/QSTK ''' (c) 2011, 2012 Georgia Tech Research Corporation This source code is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on April, 20, 2012 @author: <NAME> @contact: <EMAIL> @summary: Visualizer Main Code ''' import sys import numpy as np import math import os import dircache import AccessData as AD import pickle from PyQt4 import QtGui, QtCore, Qt from matplotlib.figure import Figure import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas # SLIDER range variable for all the range slider. Increasing this will increase precision SLIDER_RANGE=100 LOOKBACK_DAYS=20 # Range Slider Class, to implement the custom range slider. class RangeSlider(QtGui.QSlider): """ A slider for ranges. This class provides a dual-slider for ranges, where there is a defined maximum and minimum, as is a normal slider, but instead of having a single slider value, there are 2 slider values. This class emits the same signals as the QSlider base class, with the exception of valueChanged """ def __init__(self, *args): super(RangeSlider, self).__init__(*args) self._low = self.minimum() self._high = self.maximum() self.pressed_control = QtGui.QStyle.SC_None self.hover_control = QtGui.QStyle.SC_None self.click_offset = 0 # 0 for the low, 1 for the high, -1 for both self.active_slider = 0 def low(self): return self._low def setLow(self, low): self._low = low self.update() def high(self): return self._high def setHigh(self, high): self._high = high self.update() def paintEvent(self, event): # based on http://qt.gitorious.org/qt/qt/blobs/master/src/gui/widgets/qslider.cpp painter = QtGui.QPainter(self) style = QtGui.QApplication.style() for i, value in enumerate([self._low, self._high]): opt = QtGui.QStyleOptionSlider() self.initStyleOption(opt) # Only draw the groove for the first slider so it doesn't get drawn # on top of the existing ones every time if i == 0: opt.subControls = QtGui.QStyle.SC_SliderHandle#QtGui.QStyle.SC_SliderGroove | QtGui.QStyle.SC_SliderHandle else: opt.subControls = QtGui.QStyle.SC_SliderHandle if self.tickPosition() != self.NoTicks: opt.subControls |= QtGui.QStyle.SC_SliderTickmarks if self.pressed_control: opt.activeSubControls = self.pressed_control opt.state |= QtGui.QStyle.State_Sunken else: opt.activeSubControls = self.hover_control opt.sliderPosition = value opt.sliderValue = value style.drawComplexControl(QtGui.QStyle.CC_Slider, opt, painter, self) def mousePressEvent(self, event): event.accept() style = QtGui.QApplication.style() button = event.button() # In a normal slider control, when the user clicks on a point in the # slider's total range, but not on the slider part of the control the # control would jump the slider value to where the user clicked. # For this control, clicks which are not direct hits will slide both # slider parts if button: opt = QtGui.QStyleOptionSlider() self.initStyleOption(opt) self.active_slider = -1 for i, value in enumerate([self._low, self._high]): opt.sliderPosition = value hit = style.hitTestComplexControl(style.CC_Slider, opt, event.pos(), self) if hit == style.SC_SliderHandle: self.active_slider = i self.pressed_control = hit self.triggerAction(self.SliderMove) self.setRepeatAction(self.SliderNoAction) self.setSliderDown(True) break if self.active_slider < 0: self.pressed_control = QtGui.QStyle.SC_SliderHandle self.click_offset = self.__pixelPosToRangeValue(self.__pick(event.pos())) self.triggerAction(self.SliderMove) self.setRepeatAction(self.SliderNoAction) else: event.ignore() def mouseMoveEvent(self, event): if self.pressed_control != QtGui.QStyle.SC_SliderHandle: event.ignore() return event.accept() new_pos = self.__pixelPosToRangeValue(self.__pick(event.pos())) opt = QtGui.QStyleOptionSlider() self.initStyleOption(opt) if self.active_slider < 0: offset = new_pos - self.click_offset self._high += offset self._low += offset if self._low < self.minimum(): diff = self.minimum() - self._low self._low += diff self._high += diff if self._high > self.maximum(): diff = self.maximum() - self._high self._low += diff self._high += diff elif self.active_slider == 0: if new_pos >= self._high: new_pos = self._high - 1 self._low = new_pos else: if new_pos <= self._low: new_pos = self._low + 1 self._high = new_pos self.click_offset = new_pos self.update() self.emit(QtCore.SIGNAL('sliderMoved'), self._low, self._high) def __pick(self, pt): if self.orientation() == QtCore.Qt.Horizontal: return pt.x() else: return pt.y() def __pixelPosToRangeValue(self, pos): opt = QtGui.QStyleOptionSlider() self.initStyleOption(opt) style = QtGui.QApplication.style() gr = style.subControlRect(style.CC_Slider, opt, style.SC_SliderGroove, self) sr = style.subControlRect(style.CC_Slider, opt, style.SC_SliderHandle, self) if self.orientation() == QtCore.Qt.Horizontal: slider_length = sr.width() slider_min = gr.x() slider_max = gr.right() - slider_length + 1 else: slider_length = sr.height() slider_min = gr.y() slider_max = gr.bottom() - slider_length + 1 return style.sliderValueFromPosition(self.minimum(), self.maximum(), pos-slider_min, slider_max-slider_min, opt.upsideDown) ########################### ## Visualizer Class ## ########################### # Main class that contains the Visualizer Qt and all functions class Visualizer(QtGui.QMainWindow): def __init__(self): super(Visualizer, self).__init__() # Initialization is a 3 phase process : Loading Data, Declaring Variables and Creating the GUI self.LoadData() self.Reset() self.create_main_frame() self.ResetFunc() def create_main_frame(self): # Setting Up the Main Frame of the GUI self.main_frame = QtGui.QWidget() self.statusBar().showMessage('Loading') # Declaring the matplotlib canvas for plotting graphs self.dpi=100 self.fig = Figure((6.0, 5.5), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) self.ax = self.fig.gca(projection='3d') self.fig2 = Figure((6.0, 6.0), dpi=self.dpi*2) self.canvas2 = FigureCanvas(self.fig2) self.ax2 = self.fig2.gca(projection='3d') self.datetext2 = self.ax2.text2D(0, 1, 'Date : ', transform=self.ax2.transAxes) self.datetext = self.ax.text2D(0, 1, 'Date : ', transform=self.ax.transAxes) # Declaring the Texts in the GUI, fonts and Spacers to control the size of sliders self.FactorLable=QtGui.QLabel('Factors', self) self.font = QtGui.QFont("Times", 16, QtGui.QFont.Bold, True) self.font1 = QtGui.QFont("Times", 12) self.font2 = QtGui.QFont("Times", 14, QtGui.QFont.Bold, True) self.font3 = QtGui.QFont("Times", 20, QtGui.QFont.Bold, True) self.VisLable = QtGui.QLabel('QuantViz', self) self.SpacerItem1 = Qt.QSpacerItem(450,0,Qt.QSizePolicy.Fixed,Qt.QSizePolicy.Expanding) self.SpacerItem2 = Qt.QSpacerItem(300,0,Qt.QSizePolicy.Fixed,Qt.QSizePolicy.Expanding) self.SpacerItem3 = Qt.QSpacerItem(300,0,Qt.QSizePolicy.Fixed,Qt.QSizePolicy.Expanding) self.SpacerItem4 = Qt.QSpacerItem(1,500,Qt.QSizePolicy.Fixed) self.VisLable.setFont(self.font3) self.FactorLable.setFont(self.font) ########### Region for declaring the varibles associated with X Axis ######################## self.XLable=QtGui.QLabel('X', self) self.XLable.setFont(self.font2) self.XCombo = QtGui.QComboBox(self) self.XCombo.activated[str].connect(self.XComboActivated) self.XMinTag=QtGui.QLabel('Min :', self) self.XMaxTag=QtGui.QLabel('Max :', self) self.XLimitTag=QtGui.QLabel('Scale:', self) self.XSliceTag=QtGui.QLabel('Slice :', self) self.XMinLable=QtGui.QLabel(str(self.XMin), self) self.XMinLable.setFont(self.font1) self.XRange=RangeSlider(Qt.Qt.Horizontal) self.XRangeSlice=RangeSlider(Qt.Qt.Horizontal) self.XMaxLable=QtGui.QLabel(str(self.XMax), self) self.XMaxLable.setFont(self.font1) self.XMin_Box= QtGui.QLineEdit() self.XMax_Box= QtGui.QLineEdit() self.XMinSlice_Box= QtGui.QLineEdit() self.XMaxSlice_Box= QtGui.QLineEdit() self.XMin_Box.setMaxLength(4) self.XMax_Box.setMaxLength(4) self.XMinSlice_Box.setMaxLength(4) self.XMaxSlice_Box.setMaxLength(4) self.XMin_Box.setFixedSize(50,27) self.XMax_Box.setFixedSize(50,27) self.XMinSlice_Box.setFixedSize(50,27) self.XMaxSlice_Box.setFixedSize(50,27) self.connect(self.XMin_Box, QtCore.SIGNAL('editingFinished()'), self.XMin_BoxInput) self.connect(self.XMax_Box, QtCore.SIGNAL('editingFinished()'), self.XMax_BoxInput) self.connect(self.XMinSlice_Box, QtCore.SIGNAL('editingFinished()'), self.XMinSlice_BoxInput) self.connect(self.XMaxSlice_Box, QtCore.SIGNAL('editingFinished()'), self.XMaxSlice_BoxInput) ############# GUI Box - Related to X ################### Xhbox1 = QtGui.QHBoxLayout() for w in [ self.XLable, self.XCombo]: Xhbox1.addWidget(w) Xhbox1.setAlignment(w, QtCore.Qt.AlignVCenter) Xhbox1.addStretch(1) Xhbox2 = QtGui.QHBoxLayout() for w in [self.XMinTag, self.XMinLable]: Xhbox2.addWidget(w) Xhbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Xhbox2.addStretch(1) for w in [self.XMaxTag, self.XMaxLable]: Xhbox2.addWidget(w) Xhbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Xhbox3 = QtGui.QHBoxLayout() for w in [ self.XLimitTag ,self.XMin_Box, self.XRange, self.XMax_Box]: Xhbox3.addWidget(w) Xhbox3.setAlignment(w, QtCore.Qt.AlignVCenter) Xhbox4 = QtGui.QHBoxLayout() for w in [ self.XSliceTag, self.XMinSlice_Box, self.XRangeSlice, self.XMaxSlice_Box]: Xhbox4.addWidget(w) Xhbox4.setAlignment(w, QtCore.Qt.AlignVCenter) Xvbox1 = QtGui.QVBoxLayout() Xvbox1.addLayout(Xhbox1) Xvbox1.addLayout(Xhbox2) Xvbox1.addLayout(Xhbox3) Xvbox1.addLayout(Xhbox4) ########### Region for declaring the varibles associated with Y Axis ######################## self.YLable=QtGui.QLabel('Y', self) self.YLable.setFont(self.font2) self.YCombo = QtGui.QComboBox(self) self.YCombo.activated[str].connect(self.YComboActivated) self.YMinTag=QtGui.QLabel('Min :', self) self.YMaxTag=QtGui.QLabel('Max :', self) self.YLimitTag=QtGui.QLabel('Scale:', self) self.YSliceTag=QtGui.QLabel('Slice :', self) self.YMinLable=QtGui.QLabel(str(self.YMin), self) self.YMinLable.setFont(self.font1) self.YRange=RangeSlider(Qt.Qt.Horizontal) self.YRangeSlice=RangeSlider(Qt.Qt.Horizontal) self.YMaxLable=QtGui.QLabel(str(self.YMax), self) self.YMaxLable.setFont(self.font1) self.YMin_Box= QtGui.QLineEdit() self.YMax_Box= QtGui.QLineEdit() self.YMinSlice_Box= QtGui.QLineEdit() self.YMaxSlice_Box= QtGui.QLineEdit() self.YMin_Box.setMaxLength(4) self.YMax_Box.setMaxLength(4) self.YMinSlice_Box.setMaxLength(4) self.YMaxSlice_Box.setMaxLength(4) self.YMin_Box.setFixedSize(50,27) self.YMax_Box.setFixedSize(50,27) self.YMinSlice_Box.setFixedSize(50,27) self.YMaxSlice_Box.setFixedSize(50,27) self.connect(self.YMin_Box, QtCore.SIGNAL('editingFinished()'), self.YMin_BoxInput) self.connect(self.YMax_Box, QtCore.SIGNAL('editingFinished()'), self.YMax_BoxInput) self.connect(self.YMinSlice_Box, QtCore.SIGNAL('editingFinished()'), self.YMinSlice_BoxInput) self.connect(self.YMaxSlice_Box, QtCore.SIGNAL('editingFinished()'), self.YMaxSlice_BoxInput) ############# GUI Box - Related to Y ################### Yhbox1 = QtGui.QHBoxLayout() for w in [ self.YLable, self.YCombo]: Yhbox1.addWidget(w) Yhbox1.setAlignment(w, QtCore.Qt.AlignVCenter) Yhbox1.addStretch(1) Yhbox2 = QtGui.QHBoxLayout() for w in [self.YMinTag, self.YMinLable]: Yhbox2.addWidget(w) Yhbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Yhbox2.addStretch(1) for w in [self.YMaxTag, self.YMaxLable]: Yhbox2.addWidget(w) Yhbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Yhbox3 = QtGui.QHBoxLayout() for w in [ self.YLimitTag, self.YMin_Box, self.YRange, self.YMax_Box]: Yhbox3.addWidget(w) Yhbox3.setAlignment(w, QtCore.Qt.AlignVCenter) Yhbox4 = QtGui.QHBoxLayout() for w in [ self.YSliceTag,self.YMinSlice_Box, self.YRangeSlice, self.YMaxSlice_Box]: Yhbox4.addWidget(w) Yhbox4.setAlignment(w, QtCore.Qt.AlignVCenter) Yvbox1 = QtGui.QVBoxLayout() Yvbox1.addLayout(Yhbox1) Yvbox1.addLayout(Yhbox2) Yvbox1.addLayout(Yhbox3) Yvbox1.addLayout(Yhbox4) ########### Region for declaring the varibles associated with Z Axis ######################## self.ZLable=QtGui.QLabel('Z', self) self.ZLable.setFont(self.font2) self.ZCombo = QtGui.QComboBox(self) self.ZCombo.activated[str].connect(self.ZComboActivated) self.ZMinTag=QtGui.QLabel('Min :', self) self.ZMaxTag=QtGui.QLabel('Max :', self) self.ZLimitTag=QtGui.QLabel('Scale:', self) self.ZSliceTag=QtGui.QLabel('Slice :', self) self.ZMinLable=QtGui.QLabel(str(self.ZMin), self) self.ZMinLable.setFont(self.font1) self.ZRange=RangeSlider(Qt.Qt.Horizontal) self.ZRangeSlice=RangeSlider(Qt.Qt.Horizontal) self.ZMaxLable=QtGui.QLabel(str(self.ZMax), self) self.ZMaxLable.setFont(self.font1) self.ZMin_Box= QtGui.QLineEdit() self.ZMax_Box= QtGui.QLineEdit() self.ZMinSlice_Box= QtGui.QLineEdit() self.ZMaxSlice_Box= QtGui.QLineEdit() self.ZMin_Box.setMaxLength(4) self.ZMax_Box.setMaxLength(4) self.ZMinSlice_Box.setMaxLength(4) self.ZMaxSlice_Box.setMaxLength(4) self.ZMin_Box.setFixedSize(50,27) self.ZMax_Box.setFixedSize(50,27) self.ZMinSlice_Box.setFixedSize(50,27) self.ZMaxSlice_Box.setFixedSize(50,27) self.connect(self.ZMin_Box, QtCore.SIGNAL('editingFinished()'), self.ZMin_BoxInput) self.connect(self.ZMax_Box, QtCore.SIGNAL('editingFinished()'), self.ZMax_BoxInput) self.connect(self.ZMinSlice_Box, QtCore.SIGNAL('editingFinished()'), self.ZMinSlice_BoxInput) self.connect(self.ZMaxSlice_Box, QtCore.SIGNAL('editingFinished()'), self.ZMaxSlice_BoxInput) ############# GUI Box - Related to Z ################### Zhbox1 = QtGui.QHBoxLayout() for w in [ self.ZLable, self.ZCombo]: Zhbox1.addWidget(w) Zhbox1.setAlignment(w, QtCore.Qt.AlignVCenter) Zhbox1.addStretch(1) Zhbox2 = QtGui.QHBoxLayout() for w in [self.ZMinTag, self.ZMinLable]: Zhbox2.addWidget(w) Zhbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Zhbox2.addStretch(1) for w in [self.ZMaxTag, self.ZMaxLable]: Zhbox2.addWidget(w) Zhbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Zhbox3 = QtGui.QHBoxLayout() for w in [ self.ZLimitTag ,self.ZMin_Box, self.ZRange, self.ZMax_Box]: Zhbox3.addWidget(w) Zhbox3.setAlignment(w, QtCore.Qt.AlignVCenter) Zhbox4 = QtGui.QHBoxLayout() for w in [ self.ZSliceTag,self.ZMinSlice_Box, self.ZRangeSlice, self.ZMaxSlice_Box]: Zhbox4.addWidget(w) Zhbox4.setAlignment(w, QtCore.Qt.AlignVCenter) Zvbox1 = QtGui.QVBoxLayout() Zvbox1.addLayout(Zhbox1) Zvbox1.addLayout(Zhbox2) Zvbox1.addLayout(Zhbox3) Zvbox1.addLayout(Zhbox4) ########### Region for declaring the varibles associated with Size ######################## self.SizeLable=QtGui.QLabel('Size ', self) self.SizeLable.setFont(self.font2) self.SizeCombo = QtGui.QComboBox(self) self.SizeCombo.activated[str].connect(self.SComboActivated) self.SMinTag=QtGui.QLabel('Min :', self) self.SMaxTag=QtGui.QLabel('Max :', self) self.SLimitTag=QtGui.QLabel('Scale:', self) self.SSliceTag=QtGui.QLabel('Slice :', self) self.SMinLable=QtGui.QLabel(str(self.SMin), self) self.SMinLable.setFont(self.font1) self.SRange=RangeSlider(Qt.Qt.Horizontal) self.SRangeSlice=RangeSlider(Qt.Qt.Horizontal) self.SMaxLable=QtGui.QLabel(str(self.SMax), self) self.SMaxLable.setFont(self.font1) self.SMin_Box= QtGui.QLineEdit() self.SMax_Box= QtGui.QLineEdit() self.SMin_Box.setMaxLength(4) self.SMax_Box.setMaxLength(4) self.SMin_Box.setFixedSize(50,27) self.SMax_Box.setFixedSize(50,27) self.connect(self.SMin_Box, QtCore.SIGNAL('editingFinished()'), self.SMin_BoxInput) self.connect(self.SMax_Box, QtCore.SIGNAL('editingFinished()'), self.SMax_BoxInput) self.SMinSlice_Box= QtGui.QLineEdit() self.SMaxSlice_Box= QtGui.QLineEdit() self.SMinSlice_Box.setMaxLength(4) self.SMaxSlice_Box.setMaxLength(4) self.SMinSlice_Box.setFixedSize(50,27) self.SMaxSlice_Box.setFixedSize(50,27) self.connect(self.SMinSlice_Box, QtCore.SIGNAL('editingFinished()'), self.SMinSlice_BoxInput) self.connect(self.SMaxSlice_Box, QtCore.SIGNAL('editingFinished()'), self.SMaxSlice_BoxInput) ############# GUI Box - Related to Size ################### Shbox1 = QtGui.QHBoxLayout() for w in [ self.SizeLable, self.SizeCombo]: Shbox1.addWidget(w) Shbox1.setAlignment(w, QtCore.Qt.AlignVCenter) Shbox1.addStretch(1) Shbox2 = QtGui.QHBoxLayout() for w in [self.SMinTag, self.SMinLable]: Shbox2.addWidget(w) Shbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Shbox2.addStretch(1) for w in [self.SMaxTag, self.SMaxLable]: Shbox2.addWidget(w) Shbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Shbox3 = QtGui.QHBoxLayout() for w in [ self.SLimitTag ,self.SMin_Box, self.SRange, self.SMax_Box]: Shbox3.addWidget(w) Shbox3.setAlignment(w, QtCore.Qt.AlignVCenter) Shbox4 = QtGui.QHBoxLayout() for w in [ self.SSliceTag, self.SMinSlice_Box, self.SRangeSlice, self.SMaxSlice_Box]: Shbox4.addWidget(w) Shbox4.setAlignment(w, QtCore.Qt.AlignVCenter) Svbox1 = QtGui.QVBoxLayout() Svbox1.addLayout(Shbox1) Svbox1.addLayout(Shbox2) Svbox1.addLayout(Shbox3) Svbox1.addLayout(Shbox4) ########### Region for declaring the varibles associated with Color ######################## self.ColorLable=QtGui.QLabel('Color', self) self.ColorLable.setFont(self.font2) self.ColorCombo = QtGui.QComboBox(self) self.ColorCombo.activated[str].connect(self.CComboActivated) self.CMinTag=QtGui.QLabel('Min :', self) self.CMaxTag=QtGui.QLabel('Max :', self) self.CLimitTag=QtGui.QLabel('Scale:', self) self.CSliceTag=QtGui.QLabel('Slice :', self) self.CMinLable=QtGui.QLabel(str(self.CMin), self) self.CMinLable.setFont(self.font1) self.CRange=RangeSlider(Qt.Qt.Horizontal) self.CRangeSlice=RangeSlider(Qt.Qt.Horizontal) self.CMaxLable=QtGui.QLabel(str(self.CMax), self) self.CMaxLable.setFont(self.font1) self.CMin_Box= QtGui.QLineEdit() self.CMax_Box= QtGui.QLineEdit() self.CMin_Box.setMaxLength(4) self.CMax_Box.setMaxLength(4) self.CMin_Box.setFixedSize(50,27) self.CMax_Box.setFixedSize(50,27) self.connect(self.CMin_Box, QtCore.SIGNAL('editingFinished()'), self.CMin_BoxInput) self.connect(self.CMax_Box, QtCore.SIGNAL('editingFinished()'), self.CMax_BoxInput) self.CMinSlice_Box= QtGui.QLineEdit() self.CMaxSlice_Box= QtGui.QLineEdit() self.CMinSlice_Box.setMaxLength(4) self.CMaxSlice_Box.setMaxLength(4) self.CMinSlice_Box.setFixedSize(50,27) self.CMaxSlice_Box.setFixedSize(50,27) self.connect(self.CMinSlice_Box, QtCore.SIGNAL('editingFinished()'), self.CMinSlice_BoxInput) self.connect(self.CMaxSlice_Box, QtCore.SIGNAL('editingFinished()'), self.CMaxSlice_BoxInput) ############# GUI Box - Related to Color ################### Chbox1 = QtGui.QHBoxLayout() for w in [self.ColorLable, self.ColorCombo]: Chbox1.addWidget(w) Chbox1.setAlignment(w, QtCore.Qt.AlignVCenter) Chbox1.addStretch(1) Chbox2 = QtGui.QHBoxLayout() for w in [self.CMinTag, self.CMinLable]: Chbox2.addWidget(w) Chbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Chbox2.addStretch(2) for w in [self.CMaxTag, self.CMaxLable]: Chbox2.addWidget(w) Chbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Chbox3 = QtGui.QHBoxLayout() for w in [ self.CLimitTag ,self.CMin_Box, self.CRange, self.CMax_Box]: Chbox3.addWidget(w) Chbox3.setAlignment(w, QtCore.Qt.AlignVCenter) Chbox4 = QtGui.QHBoxLayout() for w in [ self.CSliceTag, self.CMinSlice_Box, self.CRangeSlice, self.CMaxSlice_Box]: Chbox4.addWidget(w) Chbox4.setAlignment(w, QtCore.Qt.AlignVCenter) Cvbox1 = QtGui.QVBoxLayout() Cvbox1.addLayout(Chbox1) Cvbox1.addLayout(Chbox2) Cvbox1.addLayout(Chbox3) Cvbox1.addLayout(Chbox4) ############# GUI Lines - All horizontal and Vertical lines are declared here ################### self.Frame1= QtGui.QFrame() self.Frame1.setFrameShape(4) self.Frame2= QtGui.QFrame() self.Frame2.setFrameShape(4) self.Frame3= QtGui.QFrame() self.Frame3.setFrameShape(4) self.Frame4= QtGui.QFrame() self.Frame4.setFrameShape(4) self.Frame5= QtGui.QFrame() self.Frame5.setFrameShape(4) self.Frame6= QtGui.QFrame() self.Frame6.setFrameShape(4) self.Frame7= QtGui.QFrame() self.Frame7.setFrameShape(4) self.Frame8= QtGui.QFrame() self.Frame8.setFrameShape(4) self.VFrame= QtGui.QFrame() self.VFrame.setFrameShape(5) self.VFrame1= QtGui.QFrame() self.VFrame1.setFrameShape(5) ############# Region to declare variables related to Date ################### self.DLable=QtGui.QLabel('Time ', self) self.DLable.setFont(self.font2) self.DateLable=QtGui.QLabel(self.dayofplot.date().isoformat(), self) self.DateLable.setFont(self.font1) self.DateMinLable=QtGui.QLabel(self.startday.date().isoformat(), self) self.DateMinLable.setFont(self.font1) self.DateSlider= QtGui.QSlider(QtCore.Qt.Horizontal, self) self.DateSlider.setRange(0,len(self.timestamps)-1) self.DateSlider.valueChanged.connect(self.DateActivated) self.DateMaxLable=QtGui.QLabel(self.endday.date().isoformat(), self) self.DateMaxLable.setFont(self.font1) ############# GUI Box - Related to Date ################### Datehbox1 = QtGui.QHBoxLayout() Datehbox1.addWidget(self.DLable) Datehbox1.addWidget(self.DateLable) Datehbox1.addStretch(1) Datehbox2 = QtGui.QHBoxLayout() for w in [ self.DateMinLable, self.DateSlider, self.DateMaxLable]: Datehbox2.addWidget(w) Datehbox2.setAlignment(w, QtCore.Qt.AlignVCenter) Datevbox1 = QtGui.QVBoxLayout() Datevbox1.addLayout(Datehbox1) Datevbox1.addLayout(Datehbox2) Datehbox3 = QtGui.QHBoxLayout() Datehbox3.addLayout(Datevbox1) Datehbox3.addWidget(self.VFrame1) ############# Region for declaring all the Checkboxes for GUI ################### self.TextCheck = QtGui.QCheckBox('Show Label', self) self.Day5Check = QtGui.QCheckBox(str(LOOKBACK_DAYS)+' Days', self) self.SizeCheck = QtGui.QCheckBox('Fix Size', self) self.ColorCheck = QtGui.QCheckBox('Fix Color', self) self.MovieCheck = QtGui.QCheckBox('Smooth for Movie', self) Checkhbox1 = QtGui.QVBoxLayout() Checkhbox1.addWidget(self.TextCheck) Checkhbox1.addWidget(self.Day5Check) Checkhbox1.addWidget(self.SizeCheck) Checkhbox1.addWidget(self.ColorCheck) Checkhbox1.addWidget(self.MovieCheck) ############# Region for Declaring all the buttons in the GUI ################### self.UpdateButton =QtGui.QPushButton('Plot',self) self.UpdateButton.setToolTip('Update the plot') self.UpdateButton.resize(self.UpdateButton.sizeHint()) self.UpdateButton.clicked.connect(self.PlotCanvas) self.SaveButton =QtGui.QPushButton('Save Plot',self) self.SaveButton.setToolTip('Save the plot') self.SaveButton.resize(self.SaveButton.sizeHint()) self.SaveButton.clicked.connect(self.save_plot) self.MovieButton =QtGui.QPushButton('Movie',self) self.MovieButton.setToolTip('Make a movie over time') self.MovieButton.resize(self.MovieButton.sizeHint()) self.MovieButton.clicked.connect(self.make_movie) self.AboutButton =QtGui.QPushButton('About',self) self.AboutButton.setToolTip('About the Visualizer') self.AboutButton.resize(self.AboutButton.sizeHint()) self.AboutButton.clicked.connect(self.on_about) self.DataButton =QtGui.QPushButton('Change Data',self) self.DataButton.setToolTip('Load new Data') self.DataButton.resize(self.DataButton.sizeHint()) self.DataButton.clicked.connect(self.ChangeDataset) self.ResetButton =QtGui.QPushButton('Reset',self) self.ResetButton.setToolTip('Reset Settings') self.ResetButton.resize(self.ResetButton.sizeHint()) self.ResetButton.clicked.connect(self.ResetSettings) self.ExitButton =QtGui.QPushButton('Exit',self) self.ExitButton.setToolTip('Exit') self.ExitButton.resize(self.ExitButton.sizeHint()) self.ExitButton.clicked.connect(QtGui.qApp.quit) self.SaveSettingsButton =QtGui.QPushButton('Save
await runt.snap.getNodeByNdef(node.get('n2')) if pivo: yield pivo, path.fork(pivo) continue ######################################################################### # regular "-> form" pivot (ie inet:dns:a -> inet:fqdn) found = False # have we found a ref/pivot? refs = node.form.getRefsOut() for refsname, refsform in refs.get('prop'): if refsform != destform.name: continue found = True refsvalu = node.get(refsname) if refsvalu is not None: async for pivo in runt.snap.nodesByPropValu(refsform, '=', refsvalu): yield pivo, path.fork(pivo) for refsname, refsform in refs.get('array'): if refsform != destform.name: continue found = True refsvalu = node.get(refsname) if refsvalu is not None: for refselem in refsvalu: async for pivo in runt.snap.nodesByPropValu(destform.name, '=', refselem): yield pivo, path.fork(pivo) for refsname in refs.get('ndef'): found = True refsvalu = node.get(refsname) if refsvalu is not None and refsvalu[0] == destform.name: pivo = await runt.snap.getNodeByNdef(refsvalu) if pivo is not None: yield pivo, path.fork(pivo) ######################################################################### # reverse "-> form" pivots (ie inet:fqdn -> inet:dns:a) refs = destform.getRefsOut() # "reverse" property references... for refsname, refsform in refs.get('prop'): if refsform != node.form.name: continue found = True refsprop = destform.props.get(refsname) async for pivo in runt.snap.nodesByPropValu(refsprop.full, '=', node.ndef[1]): yield pivo, path.fork(pivo) # "reverse" array references... for refsname, refsform in refs.get('array'): if refsform != node.form.name: continue found = True destprop = destform.props.get(refsname) async for pivo in runt.snap.nodesByPropArray(destprop.full, '=', node.ndef[1]): yield pivo, path.fork(pivo) # "reverse" ndef references... for refsname in refs.get('ndef'): found = True refsprop = destform.props.get(refsname) async for pivo in runt.snap.nodesByPropValu(refsprop.full, '=', node.ndef): yield pivo, path.fork(pivo) if not found: mesg = f'No pivot found for {node.form.name} -> {destform.name}.' raise s_exc.NoSuchPivot(n1=node.form.name, n2=destform.name, mesg=mesg) class PropPivotOut(PivotOper): ''' :prop -> * ''' async def run(self, runt, genr): warned = False async for node, path in genr: name = await self.kids[0].compute(path) prop = node.form.props.get(name) if prop is None: # all filters must sleep await asyncio.sleep(0) continue valu = node.get(name) if valu is None: # all filters must sleep await asyncio.sleep(0) continue if prop.type.isarray: fname = prop.type.arraytype.name if runt.model.forms.get(fname) is None: if not warned: mesg = f'The source property "{name}" array type "{fname}" is not a form. Cannot pivot.' await runt.snap.warn(mesg) warned = True continue for item in valu: async for pivo in runt.snap.nodesByPropValu(fname, '=', item): yield pivo, path.fork(pivo) continue # ndef pivot out syntax... # :ndef -> * if isinstance(prop.type, s_types.Ndef): pivo = await runt.snap.getNodeByNdef(valu) if pivo is None: logger.warning(f'Missing node corresponding to ndef {valu}') continue yield pivo, path.fork(pivo) continue # :prop -> * fname = prop.type.name if prop.modl.form(fname) is None: if warned is False: await runt.snap.warn(f'The source property "{name}" type "{fname}" is not a form. Cannot pivot.') warned = True continue ndef = (fname, valu) pivo = await runt.snap.getNodeByNdef(ndef) # A node explicitly deleted in the graph or missing from a underlying layer # could cause this lift to return None. if pivo: yield pivo, path.fork(pivo) class PropPivot(PivotOper): ''' :foo -> bar:foo ''' async def run(self, runt, genr): warned = False name = self.kids[1].value() prop = runt.model.props.get(name) if prop is None: raise s_exc.NoSuchProp(name=name) # TODO if we are pivoting to a form, use ndef! async for node, path in genr: if self.isjoin: yield node, path srcprop, valu = await self.kids[0].getPropAndValu(path) if valu is None: # all filters must sleep await asyncio.sleep(0) continue # TODO cache/bypass normalization in loop! try: # pivoting from an array prop to a non-array prop needs an extra loop if srcprop.type.isarray and not prop.type.isarray: for arrayval in valu: async for pivo in runt.snap.nodesByPropValu(prop.full, '=', arrayval): yield pivo, path.fork(pivo) continue async for pivo in runt.snap.nodesByPropValu(prop.full, '=', valu): yield pivo, path.fork(pivo) except (s_exc.BadTypeValu, s_exc.BadLiftValu) as e: if not warned: logger.warning(f'Caught error during pivot: {e.items()}') warned = True items = e.items() mesg = items.pop('mesg', '') mesg = ': '.join((f'{e.__class__.__qualname__} [{repr(valu)}] during pivot', mesg)) await runt.snap.fire('warn', mesg=mesg, **items) class Cond(AstNode): def getLiftHints(self): return [] async def getCondEval(self, runt): # pragma: no cover raise s_exc.NoSuchImpl(name=f'{self.__class__.__name__}.getCondEval()') class SubqCond(Cond): def __init__(self, kids=()): Cond.__init__(self, kids=kids) self.funcs = { '=': self._subqCondEq, '>': self._subqCondGt, '<': self._subqCondLt, '>=': self._subqCondGe, '<=': self._subqCondLe, '!=': self._subqCondNe, } async def _runSubQuery(self, runt, node, path): size = 1 genr = s_common.agen((node, path)) async for item in self.kids[0].run(runt, genr): yield size, item size += 1 def _subqCondEq(self, runt): async def cond(node, path): size = 0 valu = s_stormtypes.intify(await self.kids[2].compute(path)) async for size, item in self._runSubQuery(runt, node, path): if size > valu: return False return size == valu return cond def _subqCondGt(self, runt): async def cond(node, path): valu = s_stormtypes.intify(await self.kids[2].compute(path)) async for size, item in self._runSubQuery(runt, node, path): if size > valu: return True return False return cond def _subqCondLt(self, runt): async def cond(node, path): valu = s_stormtypes.intify(await self.kids[2].compute(path)) async for size, item in self._runSubQuery(runt, node, path): if size >= valu: return False return True return cond def _subqCondGe(self, runt): async def cond(node, path): valu = s_stormtypes.intify(await self.kids[2].compute(path)) async for size, item in self._runSubQuery(runt, node, path): if size >= valu: return True return False return cond def _subqCondLe(self, runt): async def cond(node, path): valu = s_stormtypes.intify(await self.kids[2].compute(path)) async for size, item in self._runSubQuery(runt, node, path): if size > valu: return False return True return cond def _subqCondNe(self, runt): async def cond(node, path): size = 0 valu = s_stormtypes.intify(await self.kids[2].compute(path)) async for size, item in self._runSubQuery(runt, node, path): if size > valu: return True return size != valu return cond async def getCondEval(self, runt): if len(self.kids) == 3: cmpr = self.kids[1].value() ctor = self.funcs.get(cmpr) if ctor is None: raise s_exc.NoSuchCmpr(cmpr=cmpr, type='subquery') return ctor(runt) subq = self.kids[0] async def cond(node, path): genr = s_common.agen((node, path)) async for _ in subq.run(runt, genr): return True return False return cond class OrCond(Cond): ''' <cond> or <cond> ''' async def getCondEval(self, runt): cond0 = await self.kids[0].getCondEval(runt) cond1 = await self.kids[1].getCondEval(runt) async def cond(node, path): if await cond0(node, path): return True return await cond1(node, path) return cond class AndCond(Cond): ''' <cond> and <cond> ''' def getLiftHints(self): h0 = self.kids[0].getLiftHints() h1 = self.kids[1].getLiftHints() return h0 + h1 async def getCondEval(self, runt): cond0 = await self.kids[0].getCondEval(runt) cond1 = await self.kids[1].getCondEval(runt) async def cond(node, path): if not await cond0(node, path): return False return await cond1(node, path) return cond class NotCond(Cond): ''' not <cond> ''' async def getCondEval(self, runt): kidcond = await self.kids[0].getCondEval(runt) async def cond(node, path): return not await kidcond(node, path) return cond class TagCond(Cond): ''' #foo.bar ''' def getLiftHints(self): kid = self.kids[0] if not isinstance(kid, TagMatch): # TODO: we might hint based on variable value return [] if not kid.isconst or kid.hasglob(): return [] return ( ('tag', {'name': kid.value()}), ) async def getCondEval(self, runt): assert len(self.kids) == 1 kid = self.kids[0] if isinstance(kid, TagMatch) and kid.isconst: name = self.kids[0].value() else: name = None if name is not None: # Allow for a user to ask for #* to signify "any tags on this node" if name == '*': async def cond(node, path): # Check if the tags dictionary has any members return bool(node.tags) return cond # Allow a user to use tag globbing to do regex matching of a node. if '*' in name: reobj = s_cache.getTagGlobRegx(name) def getIsHit(tag): return reobj.fullmatch(tag) # This cache persists per-query cache = s_cache.FixedCache(getIsHit) async def cond(node, path): return any((cache.get(p) for p in node.tags)) return cond # Default exact match async def cond(node, path): return node.tags.get(name) is not None return cond # kid is a non-runtsafe VarValue: dynamically evaluate value of variable for each node async def cond(node, path): name = await kid.compute(path) if name == '*': return bool(node.tags) if '*' in name: reobj = s_cache.getTagGlobRegx(name) return any(reobj.fullmatch(p) for p in node.tags) return node.tags.get(name) is not None return cond class HasRelPropCond(Cond): async def getCondEval(self, runt): relprop = self.kids[0] assert isinstance(relprop, RelProp) if relprop.isconst: name = relprop.value() async def cond(node, path): return node.has(name) return cond # relprop name itself is variable, so dynamically compute async def cond(node, path): name = await relprop.compute(path) return node.has(name) return cond class HasTagPropCond(Cond): async def getCondEval(self, runt): async def cond(node, path): tag, name = await self.kids[0].compute(path) return node.hasTagProp(tag, name) return cond class HasAbsPropCond(Cond): async def getCondEval(self, runt): name = self.kids[0].value() prop = runt.model.props.get(name) if prop is None: raise s_exc.NoSuchProp(name=name)
The support of the underlying event train (in seconds). """ return self._support @property def fs(self): """(float) Sampling rate / frequency (Hz).""" return self._fs @fs.setter def fs(self, val): """(float) Sampling rate / frequency (Hz).""" if self._fs == val: return try: if val <= 0: raise ValueError("sampling rate must be positive") except: raise TypeError("sampling rate must be a scalar") self._fs = val @property def label(self): """Label pertaining to the source of the event train.""" if self._label is None: warnings.warn("label has not yet been specified") return self._label @label.setter def label(self, val): if val is not None: try: # cast to str: label = str(val) except TypeError: raise TypeError("cannot convert label to string") else: label = val self._label = label def _source_subset(self, source_list): """Return an EventArray restricted to a subset of sources. Parameters ---------- source_list : array-like Array or list of source_ids. """ source_subset_ids = [] for source in source_list: try: id = self.source_ids.index(source) except ValueError: warnings.warn("source_id " + str(source) + " not found in EventArray; ignoring") pass else: source_subset_ids.append(id) new_source_ids = (np.asarray(self.source_ids)[source_subset_ids]).tolist() new_source_labels = (np.asarray(self.source_labels)[source_subset_ids]).tolist() if isinstance(self, EventArray): if len(source_subset_ids) == 0: warnings.warn("no sources remaining in requested source subset") return EventArray(empty=True) eventtrainarray = EventArray(empty=True) exclude = ["_time", "source_ids", "source_labels"] attrs = (x for x in self.__attributes__ if x not in exclude) with warnings.catch_warnings(): warnings.simplefilter("ignore") for attr in attrs: exec("eventtrainarray." + attr + " = self." + attr) eventtrainarray._time = self.time[source_subset_ids] eventtrainarray._source_ids = new_source_ids eventtrainarray._source_labels = new_source_labels eventtrainarray.loc = ItemGetter_loc(eventtrainarray) eventtrainarray.iloc = ItemGetter_iloc(eventtrainarray) return eventtrainarray elif isinstance(self, BinnedEventArray): if len(source_subset_ids) == 0: warnings.warn("no sources remaining in requested source subset") return BinnedEventArray(empty=True) binnedeventtrainarray = BinnedEventArray(empty=True) exclude = ["_data", "source_ids", "source_labels"] attrs = (x for x in self.__attributes__ if x not in exclude) with warnings.catch_warnings(): warnings.simplefilter("ignore") for attr in attrs: exec("binnedeventtrainarray." + attr + " = self." + attr) binnedeventtrainarray._data = self.data[source_subset_ids,:] binnedeventtrainarray._source_ids = new_source_ids binnedeventtrainarray._source_labels = new_source_labels binnedeventtrainarray.loc = ItemGetter_loc(binnedeventtrainarray) binnedeventtrainarray.iloc = ItemGetter_iloc(binnedeventtrainarray) return binnedeventtrainarray else: raise NotImplementedError( "EventArray._source_slice() not supported for this type yet!") ######################################################################## # class EventArray ######################################################################## class EventArray(EventBase): """A multisource event train array with shared support. Parameters ---------- timestamps : array of np.array(dtype=np.float64) event times in seconds. Array of length n_sources, each entry with shape (n_time,) fs : float, optional Sampling rate in Hz. Default is 30,000 support : EpochArray, optional EpochArray on which eventtrains are defined. Default is [0, last event] inclusive. label : str or None, optional Information pertaining to the source of the eventtrain array. source_ids : list (of length n_sources) of indices corresponding to curated data. If no source_ids are specified, then [0,...,n_sources-1] will be used. meta : dict Metadata associated with EventArray. Attributes ---------- time : array of np.array(dtype=np.float64) event times in seconds. Array of length n_sources, each entry with shape (n_time,) support : EpochArray on which EventArray is defined. n_events: np.array(dtype=np.int) of shape (n_sources,) Number of events in each source. fs: float Sampling frequency (Hz). label : str or None Information pertaining to the source of the eventtrain. meta : dict Metadata associated with eventtrain. """ __attributes__ = ["_time", "_support"] __attributes__.extend(EventBase.__attributes__) def __init__(self, timestamps=None, *, fs=None, support=None, source_ids=None, source_labels=None, source_tags=None, label=None, empty=False): # if an empty object is requested, return it: if empty: super().__init__(empty=True) for attr in self.__attributes__: exec("self." + attr + " = None") self._support = core.EpochArray(empty=True) return # set default sampling rate if fs is None: fs = 30000 warnings.warn("No sampling rate was specified! Assuming default of {} Hz.".format(fs)) def is_singletons(data): """Returns True if data is a list of singletons (more than one).""" data = np.array(data) try: if data.shape[-1] < 2 and np.max(data.shape) > 1: return True if max(np.array(data).shape[:-1]) > 1 and data.shape[-1] == 1: return True except (IndexError, TypeError, ValueError): return False return False def is_single_source(data): """Returns True if data represents event times from a single source. Examples ======== [1, 2, 3] : True [[1, 2, 3]] : True [[1, 2, 3], []] : False [[], [], []] : False [[[[1, 2, 3]]]] : True [[[[[1],[2],[3]]]]] : False """ try: if isinstance(data[0][0], list) or isinstance(data[0][0], np.ndarray): warnings.warn("event times input has too many layers!") if max(np.array(data).shape[:-1]) > 1: # singletons = True return False data = np.squeeze(data) except (IndexError, TypeError): pass try: if isinstance(data[1], list) or isinstance(data[1], np.ndarray): return False except (IndexError, TypeError): pass return True def standardize_to_2d(data): if is_single_source(data): return np.array(np.squeeze(data), ndmin=2) if is_singletons(data): data = np.squeeze(data) n = np.max(data.shape) if len(data.shape) == 1: m = 1 else: m = np.min(data.shape) data = np.reshape(data, (n,m)) else: data = np.squeeze(data) if data.dtype == np.dtype('O'): jagged = True else: jagged = False if jagged: # jagged array # standardize input so that a list of lists is converted # to an array of arrays: data = np.array( [np.array(st, ndmin=1, copy=False) for st in data]) else: data = np.array(data, ndmin=2) return data time = standardize_to_2d(timestamps) #sort event trains, but only if necessary: for ii, train in enumerate(time): if not utils.is_sorted(train): time[ii] = np.sort(train) kwargs = {"fs": fs, "source_ids": source_ids, "source_labels": source_labels, "source_tags": source_tags, "label": label} self._time = time # this is necessary so that # super() can determine self.n_sources when initializing. # initialize super so that self.fs is set: super().__init__(**kwargs) # if only empty time were received AND no support, attach an # empty support: if np.sum([st.size for st in time]) == 0 and support is None: warnings.warn("no events; cannot automatically determine support") support = core.EpochArray(empty=True) # determine eventtrain array support: if support is None: first_event = np.nanmin(np.array([source[0] for source in time if len(source) !=0])) # BUG: if eventtrain is empty np.array([]) then source[-1] # raises an error in the following: # FIX: list[-1] raises an IndexError for an empty list, # whereas list[-1:] returns an empty list. last_event = np.nanmax(np.array([source[-1:] for source in time if len(source) !=0])) self._support = core.EpochArray(np.array([first_event, last_event + 1/fs])) # in the above, there's no reason to restrict to support else: # restrict events to only those within the eventtrain # array's support: self._support = support # TODO: if sorted, we may as well use the fast restrict here as well? time = self._restrict_to_epoch_array( epocharray=self._support, time=time) self._time = time def partition(self, ds=None, n_epochs=None): """Returns an EventArray whose support has been partitioned. # Irrespective of whether 'ds' or 'n_epochs' are used, the exact # underlying support is propagated, and the first and last points # of the supports are always included, even if this would cause # n_points or ds to be violated. Parameters ---------- ds : float, optional Maximum duration (in seconds), for each epoch. n_points : int, optional Number of epochs. If ds is None and n_epochs is None, then default is to use n_epochs = 100 Returns ------- out : EventArray EventArray that has been partitioned. """ out = copy.copy(self) out._support = out.support.partition(ds=ds, n_epochs=n_epochs) out.loc = ItemGetter_loc(out) out.iloc = ItemGetter_iloc(out) #TODO: renew epoch slicers ! return out def copy(self): """Returns a copy of the EventArray.""" newcopy = copy.deepcopy(self) newcopy.loc = ItemGetter_loc(newcopy) newcopy.iloc = ItemGetter_iloc(newcopy) #TODO: renew epoch slicers ! return newcopy def __add__(self, other): """Overloaded + operator""" #TODO: additional checks need to be done, e.g., same source ids... #TODO: it's better to copy into self, so that metadata are preserved assert self.n_sources == other.n_sources support = self.support + other.support newdata = [] for source in range(self.n_sources): newdata.append(np.append(self.time[source], other.time[source])) fs = self.fs if self.fs != other.fs: fs = None return EventArray(newdata, support=support, fs=fs) def __iter__(self): """EventArray iterator initialization.""" # initialize the internal index to zero when used as iterator self._index = 0 return self def __next__(self): """EventArray iterator advancer.""" index = self._index if index > self.support.n_epochs - 1: raise StopIteration with warnings.catch_warnings(): warnings.simplefilter("ignore") support = self.support[index] time = self._restrict_to_epoch_array_fast( epocharray=support, time=self.time, copyover=True ) eventtrain = EventArray(empty=True) exclude = ["_time", "_support"] attrs = (x for x in self.__attributes__ if x not in exclude) for attr in attrs: exec("eventtrain." + attr + " = self." + attr) eventtrain._time = time eventtrain._support = support eventtrain.loc =
configuration parameters""" config = self.config autogain = AutoGain() autogain.soft_tx_gain_0dBFS = config.soft_tx_gain if config.auto_soft_tx_gain is not None: autogain.recalc0dBFSEstimate(config.auto_soft_tx_gain) autogain.auto_soft_tx_gain_clip_frac = config.auto_soft_tx_gain_clip_frac return autogain def configureDefaultChannels(self): """Configure default channels""" config = self.config bandwidth = self.bandwidth if config.channel_bandwidth is None: cbw = bandwidth else: cbw = config.channel_bandwidth channels = dragonradio.channels.defaultChannelPlan(bandwidth, cbw) logger.debug(("Channels: %s " "(bandwidth=%g; " "rx_oversample=%d; " "tx_oversample=%d; " "channel bandwidth=%g)"), list(channels), bandwidth, config.rx_oversample_factor, config.tx_oversample_factor, cbw) self.setChannels(channels) def setChannels(self, channels): """Set current channels. This function will configure the necessary RX and TX rates and initialize the synthesizer and channelizer. """ self.channels = channels[:self.config.max_channels] # Initialize RX chain self.setRXChannels(channels) # Initialize TX chain self.setTXChannels(channels) # Reconfigure the MAC if self.mac is not None: self.mac.reconfigure() def setRXChannels(self, channels): """Configure RX chain for channels""" # Initialize channelizer self.setRXRate(self.bandwidth) # We need to do this *after* setting the RX rate because it is used to # determine filter parameters self.setChannelizerChannels(channels) def setTXChannels(self, channels): """Configure TX chain for channels""" if self.config.tx_upsample: self.setTXRate(self.bandwidth) self.setSynthesizerChannels(channels) else: self.setTXChannel(self.tx_channel_idx) def setChannelizerChannels(self, channels): """Set channelizer's channels.""" self.channelizer.channels = \ Channels([(chan, self.genChannelizerTaps(chan)) for chan in channels]) def setSynthesizerChannels(self, channels): """Set synthesizer's channels.""" self.synthesizer.channels = \ Channels([(chan, self.genSynthesizerTaps(chan)) for chan in channels]) # # Tell the MAC the minimum number of samples in a slot # min_channel_bandwidth = min([chan.bw for (chan, _taps) in self.synthesizer.channels]) if self.mac is not None: self.mac.min_channel_bandwidth = min_channel_bandwidth self.controller.min_channel_bandwidth = min_channel_bandwidth def configureValidDecimationRates(self): """Determine valid decimation and interpolation rates""" # See: # https://files.ettus.com/manual/page_general.html#general_sampleratenotes # Start out with only even rates. We sort this list in reverse so we can # easily find the first rate that is less than or equal to the requested # decimation rate. rates = sorted([2**i * 5**j for i in range(1,5) for j in range(0,4)], reverse=True) # If the rate exceeds 128, then rate must be evenly divisible by 2 rates = [r for r in rates if r <= 128 or r % 2 == 0] # If the rate exceeds 256, the rate must be evenly divisible by 4. rates = [r for r in rates if r <= 256 or r % 4 == 0] self.valid_rates = rates def validRate(self, min_rate, clock_rate): """Find a valid rate no less than min_rate given the clock rate clock_rate. Arguments: min_rate: The minimum desired rate clock_rate: The radio clock rate Returns: A rate no less than rate min_rate that is supported by the hardware""" # Compute decimation rate dec_rate = math.floor(clock_rate/min_rate) logger.debug('Desired decimation rate: %g', dec_rate) # Otherwise, make sure we use a safe decimation rate if dec_rate != 1: for rate in self.valid_rates: if dec_rate >= rate: dec_rate = rate break logger.debug('Actual decimation rate: %g', dec_rate) return clock_rate/dec_rate def setRXRate(self, rate): """Set RX rate""" config = self.config if config.rx_bandwidth: want_rx_rate = config.rx_bandwidth else: rx_rate_oversample = config.rx_oversample_factor*self.phy.min_rx_rate_oversample want_rx_rate = rate*rx_rate_oversample # We max out at about 50Mhz with UHD 3.9 want_rx_rate = min(want_rx_rate, 50e6) want_rx_rate = self.validRate(want_rx_rate, self.usrp.clock_rate) if self.rx_rate != want_rx_rate: self.usrp.rx_rate = want_rx_rate self.rx_rate = self.usrp.rx_rate if self.rx_rate != want_rx_rate: raise ValueError('Wanted RX rate %g, but got %g' % (want_rx_rate, self.rx_rate)) self.channelizer.rx_rate = self.rx_rate def setTXRate(self, rate): """Set TX rate""" config = self.config if config.tx_bandwidth and config.tx_upsample: logger.warning("TX bandwidth set, but TX upsampling requested.") if config.tx_bandwidth and not config.tx_upsample: want_tx_rate = config.tx_bandwidth else: tx_rate_oversample = config.tx_oversample_factor*self.phy.min_tx_rate_oversample want_tx_rate = rate*tx_rate_oversample want_tx_rate = self.validRate(want_tx_rate, self.usrp.clock_rate) if self.tx_rate != want_tx_rate: self.usrp.tx_rate = want_tx_rate self.tx_rate = self.usrp.tx_rate if self.tx_rate != want_tx_rate: raise ValueError('Wanted TX rate %g, but got %g' % (want_tx_rate, self.tx_rate)) self.synthesizer.tx_rate = self.tx_rate def setTXChannel(self, channel_idx): """Set the transmission channel. If we are upsampling on TX, this is a no-op. Otherwise we configure the radio's frequency and bandwidth and synthesizer for the new, single channel. """ config = self.config if config.tx_upsample: logger.warning('Attempt to set TX channel when upsampling') else: # Determine TX channel from index self.tx_channel_idx = min(channel_idx, len(self.channels) - 1) channel = self.channels[self.tx_channel_idx] # Set TX rate self.setTXRate(channel.bw) # Set TX frequency logger.info("Setting TX frequency offset to %g", channel.fc) self.usrp.tx_frequency = self.frequency + channel.fc # Set synthesizer channel self.setSynthesizerChannels([Channel(0, channel.bw)]) # Allow the MAC to figure out the TX offset so snapshot self # tranmissions are correctly logged if self.mac is not None: self.mac.reconfigure() def reconfigureBandwidthAndFrequency(self, bandwidth, frequency): """Reconfigure the radio for the given bandwidth and frequency""" config = self.config if bandwidth == config.bandwidth and frequency == config.frequency: return logger.info("Reconfiguring radio: bandwidth=%f, frequency=%f", bandwidth, frequency) # Set current frequency config.frequency = frequency self.usrp.rx_frequency = self.frequency # If we are upsampling on TX, set TX frequency. Otherwise the call to # setTXChannel below will set the appropriate TX frequency. if config.tx_upsample: self.usrp.tx_frequency = self.frequency # If the bandwidth has changed, re-configure channels. Otherwise just # set the current channel---we need to re-set the channel after a # frequency change because although the channel number may be the same, # the corresponding frequency will be different. if config.bandwidth != bandwidth: config.bandwidth = bandwidth self.configureDefaultChannels() else: self.setTXChannel(self.tx_channel_idx) def environmentDiscontinuity(self): # When the environment changes, we need to inform the controller so that # it can reset MCS transition probabilities and adjust its MCS strategy # appropriately. if isinstance(self.controller, SmartController): self.controller.environmentDiscontinuity() def genChannelizerTaps(self, channel): """Generate channelizer filter taps for given channel""" config = self.config # Calculate channelizer taps if channel.bw == self.usrp.rx_rate: return [1] if config.channelizer == 'freqdomain': wp = 0.95*channel.bw ws = channel.bw fs = self.usrp.rx_rate h = dragonradio.signal.lowpass(wp, ws, fs, ftype='firpm1f2', Nmax=FDChannelizer.P) else: wp = 0.9*channel.bw ws = 1.1*channel.bw fs = self.usrp.rx_rate h = dragonradio.signal.lowpass(wp, ws, fs) logger.debug('Created prototype lowpass filter for channelizer: N=%d; wp=%g; ws=%g; fs=%g', len(h), wp, ws, fs) return h def genSynthesizerTaps(self, channel): """Generate synthesizer filter taps for given channel""" config = self.config if channel.bw == self.usrp.tx_rate: return [1] if config.synthesizer == 'freqdomain' or config.synthesizer == 'multichannel': # Frequency-space synthesizers don't apply a filter return [1] wp = 0.9*channel.bw ws = 1.1*channel.bw fs = self.usrp.tx_rate h = dragonradio.signal.lowpass(wp, ws, fs) logger.debug('Created prototype lowpass filter for synthesizer: N=%d; wp=%g; ws=%g; fs=%g', len(h), wp, ws, fs) return h def configureMAC(self, mac): """Configure MAC""" if mac == 'aloha': self.configureALOHA() elif mac == 'tdma': self.configureSimpleMACSchedule() elif mac == 'tdma-fdma': self.configureSimpleMACSchedule() elif mac == 'fdma': self.configureSimpleMACSchedule(fdma_mac=True) else: raise ValueError("Unknown MAC: {}".format(mac)) def deleteMAC(self): """Delete the current MAC""" if self.mac is not None: self.mac.stop() self.mac = None def configureALOHA(self): """Configure ALOHA MAC""" config = self.config self.mac = SlottedALOHA(self.usrp, self.phy, self.controller, self.snapshot_collector, self.channelizer, self.synthesizer, config.slot_size, config.guard_size, config.slot_send_lead_time, config.aloha_prob) # Install slot-per-channel schedule for ALOHA MAC self.installALOHASchedule() # We may not use superslots with the ALOHA MAC self.synthesizer.superslots = False # Set up overlap channelizer if isinstance(self.channelizer, OverlapTDChannelizer): # We need to demodulate half the previous slot because a sender # could start transmitting a packet halfway into a slot + epsilon. self.channelizer.prev_demod = 0.5*config.slot_size self.channelizer.cur_demod = config.slot_size self.finishConfiguringMAC() def configureTDMA(self, nslots): """Configures a TDMA MAC with 'nslots' slots. This function sets up a TDMA MAC for a schedule with `nslots` slots, but it does not claim any of the slots. After calling this function, the node *will not transmit* until it is given a slot. Args: nslots: The number of slots in the schedule """ config = self.config if isinstance(self.mac, TDMA) and self.mac.nslots == nslots: return # Replace the synthesizer if it is not a SlotSynthesizer if not isinstance(self.synthesizer, SlotSynthesizer): self.replaceSynthesizer(False) # Replace the MAC self.deleteMAC() self.mac = TDMA(self.usrp, self.phy, self.controller, self.snapshot_collector, self.channelizer, self.synthesizer, config.slot_size, config.guard_size, config.slot_send_lead_time, nslots) # We may use superslots with the TDMA MAC self.synthesizer.superslots = config.superslots # Set up overlap channelizer if isinstance(self.channelizer, OverlapTDChannelizer): # When using superslots, we need to demodulate half the previous # slot because a sender could start transmitting a packet halfway # into a slot + epsilon. if self.config.superslots: self.channelizer.prev_demod = 0.5*config.slot_size self.channelizer.cur_demod = config.slot_size else: self.channelizer.prev_demod = config.demod_overlap_size self.channelizer.cur_demod = \ config.slot_size - config.guard_size + config.demod_overlap_size self.finishConfiguringMAC() def configureFDMA(self): """Configures a FDMA MAC.""" config = self.config if isinstance(self.mac, FDMA): return #
--* **[REQUIRED]** The Amazon Managed Blockchain instance type for the node. - **AvailabilityZone** *(string) --* **[REQUIRED]** The Availability Zone in which the node exists. :rtype: dict :returns: """ pass def create_proposal(self, ClientRequestToken: str, NetworkId: str, MemberId: str, Actions: Dict, Description: str = None) -> Dict: """ Creates a proposal for a change to the network that other members of the network can vote on, for example, a proposal to add a new member to the network. Any member can create a proposal. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/CreateProposal>`_ **Request Syntax** :: response = client.create_proposal( ClientRequestToken='string', NetworkId='string', MemberId='string', Actions={ 'Invitations': [ { 'Principal': 'string' }, ], 'Removals': [ { 'MemberId': 'string' }, ] }, Description='string' ) **Response Syntax** :: { 'ProposalId': 'string' } **Response Structure** - *(dict) --* - **ProposalId** *(string) --* The unique identifier of the proposal. :type ClientRequestToken: string :param ClientRequestToken: **[REQUIRED]** A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI. This field is autopopulated if not provided. :type NetworkId: string :param NetworkId: **[REQUIRED]** The unique identifier of the network for which the proposal is made. :type MemberId: string :param MemberId: **[REQUIRED]** The unique identifier of the member that is creating the proposal. This identifier is especially useful for identifying the member making the proposal when multiple members exist in a single AWS account. :type Actions: dict :param Actions: **[REQUIRED]** The type of actions proposed, such as inviting a member or removing a member. The types of ``Actions`` in a proposal are mutually exclusive. For example, a proposal with ``Invitations`` actions cannot also contain ``Removals`` actions. - **Invitations** *(list) --* The actions to perform for an ``APPROVED`` proposal to invite an AWS account to create a member and join the network. - *(dict) --* An action to invite a specific AWS account to create a member and join the network. The ``InviteAction`` is carried out when a ``Proposal`` is ``APPROVED`` . - **Principal** *(string) --* **[REQUIRED]** The AWS account ID to invite. - **Removals** *(list) --* The actions to perform for an ``APPROVED`` proposal to remove a member from the network, which deletes the member and all associated member resources from the network. - *(dict) --* An action to remove a member from a Managed Blockchain network as the result of a removal proposal that is ``APPROVED`` . The member and all associated resources are deleted from the network. - **MemberId** *(string) --* **[REQUIRED]** The unique identifier of the member to remove. :type Description: string :param Description: A description for the proposal that is visible to voting members, for example, \"Proposal to add Example Corp. as member.\" :rtype: dict :returns: """ pass def delete_member(self, NetworkId: str, MemberId: str) -> Dict: """ Deletes a member. Deleting a member removes the member and all associated resources from the network. ``DeleteMember`` can only be called for a specified ``MemberId`` if the principal performing the action is associated with the AWS account that owns the member. In all other cases, the ``DeleteMember`` action is carried out as the result of an approved proposal to remove a member. If ``MemberId`` is the last member in a network specified by the last AWS account, the network is deleted also. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/DeleteMember>`_ **Request Syntax** :: response = client.delete_member( NetworkId='string', MemberId='string' ) **Response Syntax** :: {} **Response Structure** - *(dict) --* :type NetworkId: string :param NetworkId: **[REQUIRED]** The unique identifier of the network from which the member is removed. :type MemberId: string :param MemberId: **[REQUIRED]** The unique identifier of the member to remove. :rtype: dict :returns: """ pass def delete_node(self, NetworkId: str, MemberId: str, NodeId: str) -> Dict: """ Deletes a peer node from a member that your AWS account owns. All data on the node is lost and cannot be recovered. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/DeleteNode>`_ **Request Syntax** :: response = client.delete_node( NetworkId='string', MemberId='string', NodeId='string' ) **Response Syntax** :: {} **Response Structure** - *(dict) --* :type NetworkId: string :param NetworkId: **[REQUIRED]** The unique identifier of the network that the node belongs to. :type MemberId: string :param MemberId: **[REQUIRED]** The unique identifier of the member that owns this node. :type NodeId: string :param NodeId: **[REQUIRED]** The unique identifier of the node. :rtype: dict :returns: """ pass def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None): """ Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to ``ClientMethod``. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid for. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By default, the http method is whatever is used in the method\'s model. :returns: The presigned url """ pass def get_member(self, NetworkId: str, MemberId: str) -> Dict: """ Returns detailed information about a member. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/GetMember>`_ **Request Syntax** :: response = client.get_member( NetworkId='string', MemberId='string' ) **Response Syntax** :: { 'Member': { 'NetworkId': 'string', 'Id': 'string', 'Name': 'string', 'Description': 'string', 'FrameworkAttributes': { 'Fabric': { 'AdminUsername': 'string', 'CaEndpoint': 'string' } }, 'Status': 'CREATING'|'AVAILABLE'|'CREATE_FAILED'|'DELETING'|'DELETED', 'CreationDate': datetime(2015, 1, 1) } } **Response Structure** - *(dict) --* - **Member** *(dict) --* The properties of a member. - **NetworkId** *(string) --* The unique identifier of the network to which the member belongs. - **Id** *(string) --* The unique identifier of the member. - **Name** *(string) --* The name of the member. - **Description** *(string) --* An optional description for the member. - **FrameworkAttributes** *(dict) --* Attributes relevant to a member for the blockchain framework that the Managed Blockchain network uses. - **Fabric** *(dict) --* Attributes of Hyperledger Fabric relevant to a member on a Managed Blockchain network that uses Hyperledger Fabric. - **AdminUsername** *(string) --* The user name for the initial administrator user for the member. - **CaEndpoint** *(string) --* The endpoint used to access the member's certificate authority. - **Status** *(string) --* The status of a member. * ``CREATING`` - The AWS account is in the process of creating a member. * ``AVAILABLE`` - The member has been created and can participate in the network. * ``CREATE_FAILED`` - The AWS account attempted to create a member and creation failed. * ``DELETING`` - The member and all associated resources are in the process of being deleted. Either the AWS account that owns the member deleted it, or the member is being deleted as the result of an ``APPROVED`` ``PROPOSAL`` to remove the member. * ``DELETED`` - The member can no longer participate on the network and all associated resources are deleted. Either the AWS account that owns the member deleted it, or the member is being deleted as the result of an ``APPROVED`` ``PROPOSAL`` to remove the member. - **CreationDate** *(datetime) --* The date and time that the member was created. :type NetworkId: string :param NetworkId: **[REQUIRED]** The unique identifier of the network to which the member belongs. :type MemberId: string :param MemberId: **[REQUIRED]** The unique identifier of the member. :rtype: dict :returns: """ pass def get_network(self, NetworkId: str) -> Dict: """ Returns detailed information about a network. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/GetNetwork>`_ **Request Syntax** :: response = client.get_network(
<reponame>vishvananda/dagster '''Scaffolding machinery for dagster-airflow. Entrypoint is scaffold_airflow_dag, which consumes a pipeline definition and a config, and generates an Airflow DAG definition each of whose nodes corresponds to one step of the execution plan. ''' import os from datetime import datetime, timedelta from six import string_types from yaml import dump from dagster import check, PipelineDefinition from dagster.core.execution import create_execution_plan from .utils import IndentingBlockPrinter DEFAULT_ARGS = { 'owner': 'airflow', 'depends_on_past': False, 'email': ['<EMAIL>'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(0, 300), } def _normalize_key(key): '''We need to make sure task ids play nicely with Airflow.''' return key.replace('_', '__').replace('.', '_') def _is_py(path): '''We need to make sure we are writing out Python files.''' return path.split('.')[-1] == 'py' def _bad_import(path): '''We need to make sure our relative import will work.''' return '.' in os.path.basename(path)[:-3] def _split_lines(lines): '''Fancy utility adds a trailing comma to the last line of output.''' return (lines.strip('\n') + ',').split('\n') def _key_for_marshalled_result(step_key, result_name, prepend_run_id=True): '''Standardizes keys for marshalled inputs and outputs.''' return ( '/tmp/results/' + ('{run_id_prefix}' if prepend_run_id else '') + _normalize_key(step_key) + '___' + _normalize_key(result_name) + '.pickle' ) def _step_executions_key(step): return 'STEP_EXECUTIONS_' + _normalize_key(step.key).upper() def _format_config(config): '''This recursive descent thing formats a config dict for GraphQL.''' def _format_config_subdict(config, current_indent=0): check.dict_param(config, 'config', key_type=str) printer = IndentingBlockPrinter(indent_level=2, current_indent=current_indent) printer.line('{') n_elements = len(config) for i, key in enumerate(sorted(config, key=lambda x: x[0])): value = config[key] with printer.with_indent(): formatted_value = ( _format_config_item(value, current_indent=printer.current_indent) .lstrip(' ') .rstrip('\n') ) printer.line( '{key}: {formatted_value}{comma}'.format( key=key, formatted_value=formatted_value, comma=',' if i != n_elements - 1 else '', ) ) printer.line('}') return printer.read() def _format_config_sublist(config, current_indent=0): printer = IndentingBlockPrinter(indent_level=2, current_indent=current_indent) printer.line('[') n_elements = len(config) for i, value in enumerate(config): with printer.with_indent(): formatted_value = ( _format_config_item(value, current_indent=printer.current_indent) .lstrip(' ') .rstrip('\n') ) printer.line( '{formatted_value}{comma}'.format( formatted_value=formatted_value, comma=',' if i != n_elements - 1 else '' ) ) printer.line(']') return printer.read() def _format_config_item(config, current_indent=0): printer = IndentingBlockPrinter(indent_level=2, current_indent=current_indent) if isinstance(config, dict): return _format_config_subdict(config, printer.current_indent) elif isinstance(config, list): return _format_config_sublist(config, printer.current_indent) else: return repr(config).replace('\'', '"') check.dict_param(config, 'config', key_type=str) if not isinstance(config, dict): check.failed('Expected a dict to format as config, got: {item}'.format(item=repr(config))) return _format_config_subdict(config) def _make_editable_scaffold( pipeline_name, pipeline_description, env_config, static_scaffold, default_args ): pipeline_description = '***Autogenerated by dagster-airflow***' + ( ''' {pipeline_description} '''.format( pipeline_description=pipeline_description ) if pipeline_description else '' ) with IndentingBlockPrinter() as printer: printer.block( '\'\'\'Editable scaffolding autogenerated by dagster-airflow from pipeline ' '{pipeline_name} with config:'.format(pipeline_name=pipeline_name) ) printer.blank_line() with printer.with_indent(): for line in dump(env_config).split('\n'): printer.line(line) printer.blank_line() printer.block( 'By convention, users should attempt to isolate post-codegen changes and ' 'customizations to this "editable" file, rather than changing the definitions in the ' '"static" {static_scaffold}.py file. Please let us know if you are encountering use ' 'cases where it is necessary to make changes to the static file.'.format( static_scaffold=static_scaffold ) ) printer.line('\'\'\'') printer.blank_line() printer.line('import datetime') printer.blank_line() printer.line( 'from {static_scaffold} import make_dag'.format(static_scaffold=static_scaffold) ) printer.blank_line() printer.comment( 'Arguments to be passed to the ``default_args`` parameter of the ``airflow.DAG`` ' 'constructor.You can override these with values of your choice.' ) printer.line('DEFAULT_ARGS = {') with printer.with_indent(): for key, value in sorted(default_args.items(), key=lambda x: x[0]): printer.line('\'{key}\': {value_repr},'.format(key=key, value_repr=repr(value))) printer.line('}') printer.blank_line() printer.comment( 'Any additional keyword arguments to be passed to the ``airflow.DAG`` constructor. ' 'You can override these with values of your choice.' ) printer.line('DAG_KWARGS = {') with printer.with_indent(): printer.line('\'schedule_interval\': \'0 0 * * *\',') printer.line('}') printer.blank_line() printer.comment( 'The name of the autogenerated DAG. By default, this is just the name of the Dagster ' 'pipeline from which the Airflow DAG was generated ({pipeline_name}). You may want to ' 'override this if, for instance, you want to schedule multiple DAGs corresponding to ' 'different configurations of the same Dagster pipeline.'.format( pipeline_name=pipeline_name ) ) printer.line('DAG_ID = \'{pipeline_name}\''.format(pipeline_name=pipeline_name)) printer.blank_line() printer.comment( 'The description of the autogenerated DAG. By default, this is the description of the ' 'Dagster pipeline from which the Airflow DAG was generated. You may want to override ' 'this, as with the DAG_ID parameter.' ) printer.line( 'DAG_DESCRIPTION = \'\'\'{pipeline_description}\'\'\''.format( pipeline_description=pipeline_description ) ) printer.blank_line() printer.comment( 'Additional arguments, if any, to pass to the underlying ' '``dagster_airflow.dagster_plugin.ModifiedDockerOperator`` constructor. Set these if, ' 'for instance, you need to set special TLS parameters.' ) printer.line('MODIFIED_DOCKER_OPERATOR_KWARGS = {}') printer.blank_line() printer.comment( 'Set your S3 connection id here, if you do not want to use the default ``aws_default`` ' 'connection.' ) printer.line('S3_CONN_ID = \'aws_default\'') printer.blank_line() printer.comment('Set the host directory to mount into /tmp/results on the containers.') printer.line('HOST_TMP_DIR = \'/tmp/results\'') printer.blank_line() # This is the canonical way to hide globals from import, but not from Airflow's DagBag printer.line( '# The \'unusual_prefix\' ensures that the following code will be executed only when' ) printer.line( '# Airflow imports this file. See: https://bcb.github.io/airflow/hide-globals-in-dag-definition-file' ) printer.line('if __name__.startswith(\'unusual_prefix\'):') with printer.with_indent(): printer.line('dag, tasks = make_dag(') with printer.with_indent(): printer.line('dag_id=DAG_ID,') printer.line('dag_description=DAG_DESCRIPTION,') printer.line('dag_kwargs=dict(default_args=DEFAULT_ARGS, **DAG_KWARGS),') printer.line('s3_conn_id=S3_CONN_ID,') printer.line('modified_docker_operator_kwargs=MODIFIED_DOCKER_OPERATOR_KWARGS,') printer.line('host_tmp_dir=HOST_TMP_DIR,') printer.line(')') return printer.read() # pylint: disable=too-many-statements def _make_static_scaffold(pipeline_name, env_config, execution_plan, image, editable_scaffold): with IndentingBlockPrinter() as printer: printer.block( '\'\'\'Static scaffolding autogenerated by dagster-airflow from pipeline ' '{pipeline_name} with config:'.format(pipeline_name=pipeline_name) ) printer.blank_line() with printer.with_indent(): for line in dump(env_config).split('\n'): printer.line(line) printer.blank_line() printer.block( 'By convention, users should attempt to isolate post-codegen changes and ' 'customizations to the "editable" {editable_scaffold}.py file, rather than changing ' 'the definitions in this "static" file. Please let us know if you are encountering ' 'use cases where it is necessary to make changes to the static file.'.format( editable_scaffold=editable_scaffold ) ) printer.line('\'\'\'') printer.blank_line() printer.line('from airflow import DAG') printer.line('from airflow.operators.dagster_plugin import DagsterOperator') printer.blank_line() printer.blank_line() printer.line('CONFIG = \'\'\'') with printer.with_indent(): for line in _format_config(env_config).strip('\n').split('\n'): printer.line(line) printer.line('\'\'\'.strip(\'\\n\').strip(\' \')') printer.blank_line() printer.line('PIPELINE_NAME = \'{pipeline_name}\''.format(pipeline_name=pipeline_name)) printer.blank_line() for step in execution_plan.topological_steps(): step_executions_key = _step_executions_key(step) printer.line( '{step_executions_key} = {{'.format(step_executions_key=step_executions_key) ) with printer.with_indent(): printer.line('\'step_key\': \'{step_key}\','.format(step_key=step.key)) printer.line('\'inputs\': [') for step_input in step.step_inputs: with printer.with_indent(): printer.line('{') with printer.with_indent(): printer.line( '\'input_name\': \'{input_name}\','.format( input_name=step_input.name ) ) printer.line( '\'key\': \'{key}\''.format( key=_key_for_marshalled_result( step_input.prev_output_handle.step.key, step_input.prev_output_handle.output_name, ) ) ) printer.line('},') printer.line('],') printer.line('\'outputs\': [') for step_output in step.step_outputs: with printer.with_indent(): printer.line('{') with printer.with_indent(): printer.line( '\'output_name\': \'{output_name}\','.format( output_name=step_output.name ) ) printer.line( '\'key\': \'{key}\''.format( key=_key_for_marshalled_result(step.key, step_output.name) ) ) printer.line('},') printer.line(']') printer.line('}') printer.blank_line() printer.blank_line() printer.line('def make_dag(') with printer.with_indent(): printer.line('dag_id,') printer.line('dag_description,') printer.line('dag_kwargs,') printer.line('s3_conn_id,') printer.line('modified_docker_operator_kwargs,') printer.line('host_tmp_dir') printer.line('):') with printer.with_indent(): printer.line('dag = DAG(') with printer.with_indent(): printer.line('dag_id=dag_id,') printer.line('description=dag_description,') printer.line('**dag_kwargs') printer.line(')') printer.blank_line() printer.line('tasks = []') printer.blank_line() for step in execution_plan.topological_steps(): step_key = step.key airflow_step_key = _normalize_key(step_key) step_executions_key = _step_executions_key(step) printer.line( '{airflow_step_key}_task = DagsterOperator('.format( airflow_step_key=airflow_step_key ) ) with printer.with_indent(): printer.line('step=\'{step_key}\','.format(step_key=step_key)) printer.line('config=CONFIG,') printer.line('dag=dag,') printer.line('tmp_dir=\'/tmp/results\',') printer.line('host_tmp_dir=host_tmp_dir,') printer.line('image=\'{image}\','.format(image=image)) printer.line( 'task_id=\'{airflow_step_key}\','.format(airflow_step_key=airflow_step_key) ) printer.line('s3_conn_id=s3_conn_id,') printer.line('pipeline_name=PIPELINE_NAME,') printer.line( 'step_executions={step_executions_key},'.format( step_executions_key=step_executions_key ) ) printer.line(')') printer.line( 'tasks.append({airflow_step_key}_task)'.format( airflow_step_key=airflow_step_key ) ) printer.blank_line() for step in execution_plan.topological_steps(): for step_input in step.step_inputs: prev_airflow_step_key = _normalize_key(step_input.prev_output_handle.step.key) airflow_step_key = _normalize_key(step.key) printer.line( '{prev_airflow_step_key}_task.set_downstream(' '{airflow_step_key}_task)'.format( prev_airflow_step_key=prev_airflow_step_key, airflow_step_key=airflow_step_key, ) ) printer.blank_line() # We return both the DAG and the tasks to make testing, etc. easier printer.line('return (dag, tasks)') return printer.read() # pylint: disable=too-many-locals def scaffold_airflow_dag(pipeline, env_config, image, output_path=None, dag_kwargs=None): '''Scaffold a new Airflow DAG based on a PipelineDefinition and config. Creates an "editable" scaffold (intended for end user modification) and a "static" scaffold. The editable scaffold imports the static scaffold and defines the Airflow DAG. As a rule, both scaffold files need to be present in your Airflow DAG directory (by default, this is $AIRFLOW_HOME/dags)in order to be correctly parsed by Airflow. Note that an Airflow DAG corresponds to a Dagster execution plan, since many different execution plans may be created when a PipelineDefinition is parametrized by various config values. You may want to create multiple Airflow DAGs corresponding to, e.g., test and production configs of your Dagster pipelines. Parameters: pipeline (dagster.PipelineDefinition): Pipeline to use to construct the Airflow DAG. env_config (dict): The config to use to construct the Airflow DAG. image (str): The name of the Docker image in which your pipeline has been containerized. output_path (Union[Tuple[str, str], str, None]): Optionally specify the path at which to write the scaffolded files. If this parameter is a tuple of absolute paths, the static scaffold will be written to the first member of the tuple and the editable scaffold will be written to the second member of the tuple. If this parameter is a path to a directory, the scaffold files will be written to that directory as '{pipeline_name}_static__scaffold.py' and '{pipeline_name}_editable__scaffold.py' respectively. If this parameter is None, the scaffolds
<filename>houdini/handlers/play/pet.py<gh_stars>1-10 import asyncio import operator import random import time from datetime import datetime, timedelta from houdini import handlers from houdini.constants import ClientType, StatusField from houdini.data.mail import PenguinPostcard from houdini.data.pet import PenguinPuffle, PenguinPuffleCollection, PenguinPuffleItemCollection, PuffleCollection, \ PuffleItemCollection, PuffleTreasureFurniture, PuffleTreasureItem, PuffleTreasurePuffleItem from houdini.data.room import PenguinBackyardRoom, PenguinIglooRoom from houdini.handlers import Priority, XMLPacket, XTPacket PuffleKillerInterval = 1800 LegacyPuffleIds = [0, 1, 2, 3, 4, 5, 6, 7, 8] BrushCareItemId = 1 BathCareItemId = 8 SleepCareItemId = 37 BasicCareInventory = [BrushCareItemId, BathCareItemId, SleepCareItemId] async def decrease_stats(server): while True: await asyncio.sleep(PuffleKillerInterval) for penguin in server.penguins_by_id.values(): if type(penguin.room) != PenguinIglooRoom or penguin.room.penguin_id != penguin.id: for puffle_id in list(penguin.puffles.keys()): puffle = penguin.puffles[puffle_id] puffle_crumbs = server.puffles[puffle.puffle_id] is_legacy_puffle = penguin.is_legacy_client and puffle.puffle_id in LegacyPuffleIds is_vanilla_puffle = penguin.is_vanilla_client and not puffle.backyard if is_vanilla_puffle or is_legacy_puffle: if puffle.id == penguin.walking: await puffle.update( food=max(10, puffle.food - 8), rest=max(10, puffle.rest - 8), clean=max(10, puffle.clean - 8) ).apply() else: await puffle.update( food=max(0, puffle.food - 4), play=max(0, puffle.play - 4), rest=max(0, puffle.rest - 4), clean=max(0, puffle.clean - 4) ).apply() if is_legacy_puffle and puffle.food == puffle.rest == puffle.clean == 0: await penguin.add_inbox(server.postcards[puffle_crumbs.runaway_postcard], details=puffle.name) await penguin.puffles.delete(puffle.id) elif is_legacy_puffle and puffle.food < 10: notification_aware = await PenguinPostcard.query.where( (PenguinPostcard.penguin_id == penguin.id) & (PenguinPostcard.postcard_id == 110) & (PenguinPostcard.details == puffle.name)).gino.scalar() if not notification_aware: await penguin.add_inbox(server.postcards[110], details=puffle.name) async def dig(p, on_command=False): if p.walking is not None: treasure_types = {0: 'coins', 1: 'food', 2: 'furniture', 3: 'clothing', None: None} walking_puffle = p.puffles[p.walking] treasure_quantity, item_id = 1, 0 if p.can_dig_gold: treasure_types = {0: 'coins', 4: 'golden', None: None} puffle_age = (datetime.now() - walking_puffle.adoption_date).days puffle_health = walking_puffle.food + walking_puffle.play + walking_puffle.rest + walking_puffle.clean age_percent = puffle_age / 365 health_percent = puffle_health / 400 overall_percent = (age_percent + health_percent * 2) / 3 if overall_percent > random.random() and p.is_member: treasure_type_id = random.choice(list(treasure_types)) treasure_type = treasure_types[treasure_type_id] else: treasure_type_id = random.choice([0, None]) treasure_type = treasure_types[treasure_type_id] if not on_command and treasure_type is None: return await p.room.send_xt('nodig', p.id, 1) elif treasure_type == 'food': diggable_food_ids = [t.puffle_item_id for t in p.server.puffle_food_treasure if t.puffle_id == walking_puffle.puffle_id and t.puffle_item_id not in p.puffle_items] if diggable_food_ids: item_id = random.choice(diggable_food_ids) await p.add_puffle_item(p.server.puffle_items[item_id], notify=False, cost=0) if item_id == p.server.puffles[walking_puffle.puffle_id].favourite_food: await p.add_stamp(p.server.stamps[495]) elif treasure_type == 'furniture': diggable_furniture_ids = [t.furniture_id for t in p.server.puffle_furniture_treasure if t.puffle_id == walking_puffle.puffle_id and t.furniture_id not in p.furniture] if diggable_furniture_ids: item_id = random.choice(diggable_furniture_ids) await p.add_furniture(p.server.furniture[item_id], notify=False, cost=0) await p.add_stamp(p.server.stamps[494]) elif treasure_type == 'clothing': diggable_clothing_ids = [t.item_id for t in p.server.puffle_clothing_treasure if t.puffle_id == walking_puffle.puffle_id and t.item_id not in p.inventory] if diggable_clothing_ids: item_id = random.choice(diggable_clothing_ids) await p.add_inventory(p.server.items[item_id], notify=False, cost=0) await p.add_stamp(p.server.stamps[494]) elif treasure_type == 'golden': item_id = 1 treasure_quantity = random.randrange(1, 4) await p.update(nuggets=p.nuggets + treasure_quantity).apply() await p.send_xt('currencies', f'1|{p.nuggets}') if not item_id: treasure_type_id, treasure_type = 0, 'coins' if (on_command and treasure_type is None) or treasure_type == 'coins': treasure_quantity = random.randrange(10, 250) await p.update(coins=p.coins + treasure_quantity).apply() if treasure_quantity >= 50: await p.add_stamp(p.server.stamps[493]) if not p.has_dug: await p.add_stamp(p.server.stamps[489]) for player in p.room.penguins_by_id.values(): if player.id != p.id: await player.add_stamp(p.server.stamps[490]) await p.room.send_xt('puffledig', p.id, p.walking, treasure_type_id, item_id, treasure_quantity, int(not p.has_dug)) await p.update(has_dug=True).apply() await walking_puffle.update(has_dug=True).apply() color_dig_count = len({puffle.puffle_id for puffle in p.puffles.values() if puffle.has_dug}) if color_dig_count >= 11: await p.add_stamp(p.server.stamps[491]) await p.server.redis.setex(f'houdini.last_dig.{p.id}', 120, int(time.time())) dig_count = await p.server.redis.incr(f'houdini.dig_count.{p.id}') if dig_count == 1: await p.server.redis.expireat(f'houdini.dig_count.{p.id}', (datetime.now() + timedelta(days=1)).timestamp()) if dig_count == 5: await p.add_stamp(p.server.stamps[492]) await p.status_field_set(StatusField.PuffleTreasureInfographic) async def deliver(p, care_item, puffle): if care_item.cost != 0 and care_item.id not in p.puffle_items: await p.add_puffle_item(care_item) if care_item.cost == 0 or care_item.id in p.puffle_items: if care_item.type == 'food': quantity_owned = p.puffle_items[care_item.id].quantity if quantity_owned > 1: await p.puffle_items[care_item.id].update(quantity=quantity_owned - 1).apply() elif quantity_owned == 1: await p.puffle_items.delete(care_item.id) if care_item.id == p.server.puffles[puffle.puffle_id].favourite_food: await puffle.update(food=100, play=100, rest=100, clean=100).apply() else: await puffle.update( food=max(0, min(puffle.food + care_item.food_effect, 100)), play=max(0, min(puffle.play + care_item.play_effect, 100)), rest=max(0, min(puffle.rest + care_item.rest_effect, 100)), clean=max(0, min(puffle.clean + care_item.clean_effect, 100)), ).apply() celebration = puffle.food == puffle.play == puffle.rest == puffle.clean == 100 care_item_delivery = f'{puffle.id}|{puffle.food}|{puffle.play}|{puffle.rest}|{puffle.clean}|{int(celebration)}' await p.room.send_xt('pcid', p.id, care_item_delivery) if care_item.id == 126: p.can_dig_gold = True await p.room.send_xt('oberry', p.id, p.walking) await p.send_xt('currencies', f'1|{p.nuggets}') def get_client_puffle_id(p, puffle_id): parent_id = p.server.puffles[puffle_id].parent_id return (parent_id, puffle_id) if parent_id is not None else (puffle_id, '') def get_client_puffle_id_string(p, puffle_id): parent_id, puffle_id = get_client_puffle_id(p, puffle_id) return f'{parent_id}|{puffle_id}' def get_my_player_puffles(p): if p.is_vanilla_client: return [f'{puffle.id}|{get_client_puffle_id_string(p, puffle.puffle_id)}|' f'{puffle.name}|{int(time.mktime(puffle.adoption_date.timetuple()))}|{puffle.food}|{puffle.play}|' f'{puffle.rest}|{puffle.clean}|{puffle.hat or 0}|0' for puffle in p.puffles.values()] else: return [f'{puffle.id}|{puffle.name}|{puffle.puffle_id}|{puffle.clean}|' f'{puffle.food}|{puffle.rest}|100|100|100' for puffle in p.puffles.values() if puffle.puffle_id in LegacyPuffleIds] def get_my_player_walking_puffle(p): if p.walking is not None and p.is_vanilla_client: puffle = p.puffles[p.walking] parent_id, puffle_id = get_client_puffle_id(p, puffle.puffle_id) return f'{puffle.id}|{parent_id}|{puffle_id}|{puffle.hat or 0}|0' return '||||' def check_name(p, puffle_name): tokens = puffle_name.lower().split() clean = not any(word in tokens for word in p.server.chat_filter_words.keys()) length_ok = 1 <= len(puffle_name) <= 12 characters_ok = puffle_name.isalpha() return characters_ok and length_ok and clean @handlers.boot async def puffles_load(server): server.puffles = await PuffleCollection.get_collection() server.puffle_items = await PuffleItemCollection.get_collection() server.logger.info(f'Loaded {len(server.puffle_items)} puffle care items') server.logger.info(f'Loaded {len(server.puffles)} puffles') server.puffle_food_treasure = await PuffleTreasurePuffleItem.query.gino.all() server.puffle_furniture_treasure = await PuffleTreasureFurniture.query.gino.all() server.puffle_clothing_treasure = await PuffleTreasureItem.query.gino.all() server.puffle_killer = asyncio.create_task(decrease_stats(server)) @handlers.handler(XMLPacket('login'), priority=Priority.Low) @handlers.allow_once async def load_pet_inventory(p): p.puffles = await PenguinPuffleCollection.get_collection(p.id) p.puffle_items = await PenguinPuffleItemCollection.get_collection(p.id) await p.send_xt('pgu', *get_my_player_puffles(p)) @handlers.handler(XTPacket('p', 'getdigcooldown'), pre_login=True) async def handle_get_dig_cooldown(p): last_dig = await p.server.redis.get(f'houdini.last_dig.{p.id}') if last_dig is not None: cooldown_remaining = max(0, 120 - (int(time.time()) - int(last_dig))) return await p.send_xt('getdigcooldown', cooldown_remaining) await p.send_xt('getdigcooldown', 0) @handlers.handler(XTPacket('p', 'checkpufflename')) async def handle_check_puffle_name_with_response(p, puffle_name): name_ok = check_name(p, puffle_name) await p.send_xt('checkpufflename', puffle_name, int(name_ok)) @handlers.handler(XTPacket('p', 'pcn')) async def handle_check_puffle_name(p, puffle_name): name_ok = check_name(p, puffle_name) await p.send_xt('pcn', puffle_name, int(name_ok)) @handlers.handler(XTPacket('p', 'pg'), client=ClientType.Vanilla) async def handle_get_player_puffles_vanilla(p, penguin_id: int, room_type: str): is_backyard = room_type == 'backyard' owned_puffles = await PenguinPuffle.query.where((PenguinPuffle.penguin_id == penguin_id) & (PenguinPuffle.backyard == is_backyard)).gino.all() walking = p.server.penguins_by_id[penguin_id].walking if penguin_id in p.server.penguins_by_id else None player_puffles = [f'{puffle.id}|{get_client_puffle_id_string(p, puffle.puffle_id)}|' f'{puffle.name}||{puffle.food}|{puffle.play}|{puffle.rest}|{puffle.clean}|' f'{puffle.hat or 0}|0|0|{int(puffle.id == walking)}' for puffle in owned_puffles] await p.send_xt('pg', len(owned_puffles), *player_puffles) if len(owned_puffles) >= 10: await p.status_field_set(StatusField.MoreThanTenPufflesBackyardMessage) @handlers.handler(XTPacket('p', 'pg'), client=ClientType.Legacy) async def handle_get_player_puffles_legacy(p, penguin_id: int): owned_puffles = await PenguinPuffle.query.where((PenguinPuffle.penguin_id == penguin_id)).gino.all() walking = p.server.penguins_by_id[penguin_id].walking if penguin_id in p.server.penguins_by_id else None player_puffles = [f'{puffle.id}|{puffle.name}|{puffle.puffle_id}|' f'{puffle.clean}|{puffle.food}|{puffle.rest}|100|100|100|0|0|0|{int(puffle.id == walking)}' for puffle in owned_puffles if puffle.puffle_id in LegacyPuffleIds] await p.send_xt('pg', *player_puffles) @handlers.handler(XTPacket('p', 'pgu')) async def handle_get_my_player_puffles(p): await p.send_xt('pgu', *get_my_player_puffles(p)) @handlers.handler(XTPacket('p', 'pn'), client=ClientType.Vanilla) async def handle_adopt_puffle_vanilla(p, type_id: int, name: str, subtype_id: int): if type_id not in p.server.puffles or not check_name(p, name): return await p.send_error(441) name = name.title() cost = p.server.puffles[type_id].cost if p.coins < cost: return await p.send_error(401) if len(p.puffles) >= 75: return await p.send_error(440) puffle_id = subtype_id if bool(subtype_id) else type_id if type_id == 10: if not p.rainbow_adoptability: return await p.send_error(441) await p.update(rainbow_adoptability=False).apply() elif type_id == 11: await p.update(nuggets=p.nuggets - 15).apply() p.can_dig_gold = False elif subtype_id == 0: await p.add_puffle_item(p.server.puffle_items[3], quantity=5, cost=0) await p.add_puffle_item(p.server.puffle_items[79], cost=0) await p.add_puffle_item(p.server.puffle_items[p.server.puffles[puffle_id].favourite_toy]) await p.update(coins=p.coins - cost).apply() puffle = await p.puffles.insert(puffle_id=puffle_id, name=name) parent_id, puffle_id = get_client_puffle_id(p, puffle.puffle_id) puffle_string = f'{puffle.id}|{parent_id}|{puffle_id}|{puffle.name}|{int(time.time())}' \ f'|100|100|100|100|0|0' await p.send_xt('pn', p.coins, puffle_string) await p.add_inbox(p.server.postcards[111], details=puffle.name) igloo_puffle_count = sum(not puff.backyard for puff in p.puffles.values()) if igloo_puffle_count > 10: puffle_to_relocate = next(puff for puff in p.puffles.values() if not puff.backyard) await puffle_to_relocate.update(backyard=True).apply() @handlers.handler(XTPacket('p', 'pn'), client=ClientType.Legacy) async def handle_adopt_puffle_legacy(p, type_id: int, name: str): if type_id not in LegacyPuffleIds or not check_name(p, name): return await p.send_error(441) name = name.title() cost = 800 if p.coins < cost: return await p.send_error(401) if len(p.puffles) >= 18: return await p.send_error(440) await p.update(coins=p.coins - cost).apply() puffle = await p.puffles.insert(puffle_id=type_id, name=name) puffle_string = f'{puffle.id}|{puffle.name}|{puffle.puffle_id}|100|100|100|100|100|100' await p.send_xt('pn', p.coins, puffle_string) await p.add_inbox(p.server.postcards[111], details=puffle.name) await p.add_puffle_item(p.server.puffle_items[p.server.puffles[type_id].favourite_toy], notify=False) await p.send_xt('pgu', *get_my_player_puffles(p)) @handlers.handler(XTPacket('p', 'pgpi'), client=ClientType.Vanilla) async def handle_get_care_inventory(p): await p.send_xt('pgpi', *(f'{item_id}|1' for item_id in BasicCareInventory), *(f'{care_item.item_id}|{care_item.quantity}' for care_item in p.puffle_items.values())) @handlers.handler(XTPacket('p', 'pm')) async def handle_puffle_move(p, puffle: PenguinPuffle, x: int, y: int): await p.room.send_xt('pm', f'{puffle.id}|{x}|{y}', f=operator.attrgetter('is_vanilla_client')) await p.room.send_xt('pm', puffle.id, x, y, f=operator.attrgetter('is_legacy_client')) @handlers.handler(XTPacket('p', 'ps')) async def handle_puffle_frame(p, puffle_id: int, frame_id: int): if puffle_id in p.puffles: await p.room.send_xt('ps', puffle_id, frame_id) @handlers.handler(XTPacket('p', 'pw'), client=ClientType.Vanilla) async def handle_puffle_walk_vanilla(p, puffle: PenguinPuffle, walking: int): if not p.walking and walking: await p.update(walking=puffle.id).apply() parent_id, puffle_id = get_client_puffle_id(p, puffle.puffle_id) await p.room.send_xt('pw', p.id, puffle.id, parent_id, puffle_id, 1, puffle.hat or 0) elif not walking and puffle.id == p.walking: igloo_puffle_count = sum(not puff.backyard and puff.id != puffle.id for puff in p.puffles.values()) in_backyard = type(p.room) == PenguinBackyardRoom return_to_backyard = in_backyard or type(p.room) != PenguinIglooRoom and puffle.backyard if igloo_puffle_count >= 10 and not return_to_backyard: return await p.send_error(443) await puffle.update(backyard=return_to_backyard).apply() await p.update(walking=None).apply() await p.room.send_xt('pw', p.id, puffle.id, 0, 0, 0, 0) puffle_string = f'{puffle.id}||||||||||||{walking}' await p.room.send_xt('pw', p.id, puffle_string, f=operator.attrgetter('is_legacy_client')) p.can_dig_gold = False if not p.status_field_get(StatusField.HasWalkedPuffleFirstTime): await p.status_field_set(StatusField.HasWalkedPuffleFirstTime) else: await p.status_field_set(StatusField.HasWalkedPuffleSecondTime) @handlers.handler(XTPacket('p', 'pw'), client=ClientType.Legacy) async def handle_puffle_walk_legacy(p, puffle: PenguinPuffle, walking: int): if
#!/usr/bin/env py50n """ $lic$ Copyright (C) 2019-2020 by The Board of Trustees of Stanford University This program is free software: you can redistribute it and/or modify it under the terms of the Modified BSD-3 License as published by the Open Source Initiative. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received a copy of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. """ import time import sys, os from template import * from parallel import SSHPool from collections import OrderedDict from util import * # add the modules dir, which contains nn_dataflow, to PYTHONPATH for subprocs # that we spawn modulesDir = os.getcwd()+'/modules' prevPYTHONPATH = os.environ.get('PYTHONPATH', '') os.environ['PYTHONPATH'] = prevPYTHONPATH+':'+modulesDir #sys.path += [cwd+'/modules'] PWD = os.getcwd() # TODO put in defines.py CNN_PARTS = [''] LSTM_PARTS = [''] GEN_STRING_BATCH_SIZE = 1 # NOTE: really handle "unroll size" in the multiplier # TODO: to coarse-parallelize entire process, transpose `for c in configs` loops # across generate fns; then, handle one config on one worker sshPool = SSHPool(8) # TODO: parameterize multipliers # returns a dict-of-dicts mapping a configuration's name to its associated params def parseConfigsTXT(txt): with open(txt, 'r') as txtFile: lines = txtFile.read().splitlines() # gets rid of \n fieldNames = lines[0].split() entries = lines[1:] # use an OrderedDict to maintain the order of entries in configs file configs = OrderedDict() for eStr in entries: eArr = eStr.split() eTyped = [] # if commented out, skip. if eArr[0][0] == '#': continue # convert parameters to int types as necessary for i, e in enumerate(eArr): try: te = float(e) # assume fractional if te % 1 == 0: te = int(te) # was an int eTyped.append(te) except ValueError: if ',' in e: l = e.split(',') # was a list if l[-1] == '': # chop off any trailing '' l = l[:-1] eTyped.append(l) else: eTyped.append(e) # was a str params = {fieldNames[i]: eTyped[i] for i in range(len(fieldNames))} configs[params['name']] = params return configs def buildScheduleGenString(batchSize, wordSize, nodeXY, arrayXY, localBufferSize, globalBufferSize, networkName): s = "/usr/bin/python2 modules/nn_dataflow/eyeriss_search.py --batch %d"\ " --word %d --nodes %d %d --array %d %d --regf %d --gbuf %d --op-cost 1"\ " --hier-cost 200 6 2 1 --hop-cost 0 --unit-static-cost 1 %s"\ " --disable-bypass 'i' 'o' 'f'" % (batchSize, wordSize, nodeXY, nodeXY, arrayXY, arrayXY, localBufferSize, globalBufferSize, networkName) return s #python $ZSIMPATH/misc/opTrace/test/gen_trace.py $1 $2 def buildTraceGenString(scheduleFile, tracesDir): s = "/usr/bin/python2 deps/ORNL-zsim-logic/misc/opTrace/test/gen_trace.py"\ " %s %s" % (scheduleFile, tracesDir) return s # TODO redo _temp to include batch size dependency def generateSchedules(configs, workloads): for c in configs: p = configs[c] # params for w in workloads: W = workloads[w] schedulesDir = './schedules/%s/%s' % (p['name'], W['alias']) touchDir(schedulesDir) for part in W['parts']: ## Before generating schedule, if need be: Generate the template ## and fill in batch size dependencies for final lstm layer if part == 'lstm_lm1b_2' or part == 'lstm_langmod_2': templateFile = './templates/%s_TEMPLATE.py' % part filledFile = './modules/examples/%s.py' % part # TODO non-parallelizable subsMap = {'BATCHSIZEX4': str(GEN_STRING_BATCH_SIZE * 4), 'BATCHSIZEX32': str(GEN_STRING_BATCH_SIZE * 32)} fillTemplate(templateFile, filledFile, subsMap, None) ## ## scheduleFile = schedulesDir + '/' + part if not os.path.exists(scheduleFile): cmd = buildScheduleGenString(GEN_STRING_BATCH_SIZE, p['wordSize'], p['nodeXY'], p['arrayXY'], p['localBufferSize'], p['globalBufferSize'], part) fullCmd = cmd + ' > ' + scheduleFile t = time.time() os.system(fullCmd) t = time.time() - t print(p['name'] + "::" + part + " took %.2f s." % t) def generateOps(configs, workloads): for c in configs: p = configs[c] for w in workloads: W = workloads[w] scheduleFile = './schedules/%s/%s/%s' % (p['name'], W['alias'], W['name']) print(scheduleFile) opsFile = './ops/%s_%s_ops.pl' % (p['name'], W['name']) numNetworkParts = len(W['parts']) # for historical reasons os.system('./Extract_operations.pl %s %s %s' % (scheduleFile, opsFile, numNetworkParts)) def generateTraces(configs, workloads): for c in configs: p = configs[c] # params for w in workloads: W = workloads[w] tracesDir = './traces/%s/%s' % (p['name'], W['alias']) if not os.path.isdir(tracesDir): touchDir(tracesDir) for part in W['parts']: scheduleFile = './schedules/%s/%s/%s' % (p['name'], W['alias'], part) cmd = buildTraceGenString(scheduleFile, tracesDir) t = time.time() os.system(cmd) t = time.time() - t print(p['name'] + "::" + part + " took %.2f s." % t) # template-fill the zsim and tech config files # NOTE: must make the distinction between baseline and n3xt, as their templates # are different (for both zsim .cfg and tech config) def generateSimConfigs(configs): for c in configs: p = configs[c] name = p['name'] if 'baseline' in name: zsimTemplateFile = './templates/zsim-baseline.cfg' techTemplateFile = './templates/tech-baseline.pl' else: zsimTemplateFile = './templates/zsim-non-baseline.cfg' techTemplateFile = './templates/tech-non-baseline.pl' cfgFilesDir = './cfg/' + name if True:#not os.path.isdir(cfgFilesDir): touchDir(cfgFilesDir) zsimFile = cfgFilesDir + '/zsim.cfg' #techFile = cfgFilesDir + '/tech.pl' # NOTE must follow Perl script convention for now... techFile = PWD + '/config/tech/Config_%s_28nm_1_16.pl' % name # Compute some parameters needed to fill the templates. nNodes = p['nodeXY']**2 nCompute = p['arrayXY']**2 patchRoot = PWD + '/patchRoot/patchRoot_bc%d_bn%d' % (nNodes, nNodes) assert(p['globalBufferSize'] % nNodes == 0) gbufPerUnitSize = p['globalBufferSize'] / nNodes if 'baseline' in name: memBWPerUnit = None # won't be filled in else: memBWPerUnit = p['memBWPerChannel'] * p['memNChannels'] // nNodes compLkgPerNode = p['compLkgPerCU'] * nCompute # not necessarily an int here! # Fill templates for zsim... # NOTE: for baseline templates which don't have latency, etc. # macros (since they use DDR), the template-fill will just ignore # unneded subs in subsMap. zsimSubsMap = {'nNodes': str(nNodes), 'frequency': str(p['frequency']), 'gbufPerUnitSize': str(int(gbufPerUnitSize)), 'globalBufferLat': str(p['globalBufferLat']), # non-baseline-only subs. below 'memRDLat': str(p['memRDLat']), 'memWRLat': str(p['memWRLat']), 'memBWPerUnit': str(memBWPerUnit), 'patchRoot': patchRoot} # field(s) specific to non-baseline configs if 'baseline' not in name: zsimSubsMap['memNChannels'] = str(p['memNChannels']) fillTemplate(zsimTemplateFile, zsimFile, zsimSubsMap, None, '!') # ...and tech. techSubsMap = {'nNodes': str(nNodes), 'frequency': str(float(p['frequency'])/1000), 'nCompute': str(nCompute), 'compLkgPerNode': str(compLkgPerNode), # TODO beware scientific notation for too-small inputs! 'dynEPerOp': str(float(p['dynEPerOp'])/1000), 'regEPerBit': str(p['regEPerBit']), 'localBufferEPerAccess': str(p['localBufferEPerBit']*256/1000), 'globalBufferEPerAccess': str(p['globalBufferEPerBit']*256/1000), 'memRDEPerBit': str(p['memRDEPerBit']), 'memWREPerBit': str(p['memWREPerBit'])} # field(s) specific to non-baseline configs if 'baseline' not in name: techSubsMap['memLkg'] = str(int(p['memLkg']*1000)) fillTemplate(techTemplateFile, techFile, techSubsMap, None, '!') os.system('chmod a+x %s' % techFile) # make the .pl file executable def createWorkingDirs(configs, workloads): for c in configs: p = configs[c] # params for w in workloads: W = workloads[w] traceSrcSubDir = './traces/%s/%s/*' % (p['name'], W['alias']) workingSubDir = './working/%s/%s' % (p['name'], W['alias']) os.system('mkdir -p %s' % workingSubDir) # create the subdir linkclone_dir(traceSrcSubDir, workingSubDir) # clone the trace directory def generatePatchRoots(configs): for c in configs: p = configs[c] nNodes = p['nodeXY']**2 patchRootDir = './patchRoot/patchRoot_bc%d_bn%d' % (nNodes, nNodes) if not os.path.exists(patchRootDir): os.system('./patchRoot/genPatchRoot.py -n %d --nodes %d' ' -d patchRoot/patchRoot_bc%d_bn%d' % (nNodes, nNodes, nNodes, nNodes)) os.system('chmod -R 755 ./patchRoot/patchRoot_bc%d_bn%d' % (nNodes, nNodes)) # TODO prevent re-run if zsim.out exists? def runZsimSimulations(configs, workloads, usePool=False): for c in configs: p = configs[c] # params print('****Zsim for %s' % p['name']) for w in workloads: W = workloads[w] workingSubDir = PWD + '/working/%s/%s' % (p['name'], W['alias']) zsimCfgFullPath = PWD + '/cfg/%s/zsim.cfg' % p['name'] zsimCmd = 'cd %s && %s/simulate_zsim_3.sh %s' %\ (workingSubDir, PWD, zsimCfgFullPath) if usePool: sshPool.submit(zsimCmd) else: os.system(zsimCmd) if usePool: sshPool.join() def createPostprocStructure(configs, workloads): for i, c in enumerate(configs): p = configs[c] for w in workloads: W = workloads[w] workingSubDir = PWD + '/working/%s/%s' % (p['name'], W['alias']) postprocSubDir = PWD + '/%s/16/1/%s' % (W['name'], p['name']) touchDir(postprocSubDir) os.system('ln -sf %s/* %s' % (workingSubDir, postprocSubDir)) # call Parse_all_results.pl on all the simulated files def runPostproc(configs, workloads): os.system('mkdir -p ./results') # done anyway for c in configs: p = configs[c] for w in workloads: W = workloads[w] cmdStr = './Parse_all_results.pl %s %s %s %s' % (W['name'], str(1), p['baseline'], p['name']) print(cmdStr) os.system(cmdStr) def spreadsheetResults(configs, workloads): for w in workloads: W = workloads[w] print('-'*10 + ' ' + W['alias'] + ' ' + '-'*10) for c in configs: resultsFileName = './results/%s_16_1_%s.csv' % (W['name'], c) with open(resultsFileName, 'r') as f: resLine = f.readlines()[-1] toks = resLine.split(',') # align and print the names and columns print(c + ',' + ' '*(30-len(c)) + ','.join(toks[1:7+1])) print('') # just a separating line if __name__ == '__main__': # parse the configs and workloads configs = parseConfigsTXT('./configs.conf') workloads = parseConfigsTXT('./workloads.conf') SKIP_SCHED_TRACE = True if not SKIP_SCHED_TRACE: print('-'*30 + 'schedules' + '-'*30) generateSchedules(configs, workloads) print('-'*30 + 'ops' + '-'*30) generateOps(configs, workloads) if not SKIP_SCHED_TRACE: print('-'*30 + 'traces' + '-'*30) generateTraces(configs, workloads) print('-'*30 + 'patchroots'
<gh_stars>1-10 import tensorflow as tf class _ResidualInnerBlock(tf.keras.Model): def __init__(self, num_filters_resnet_conv1, num_filters_resnet_conv2, kernel_size, *args, **kwargs): super().__init__(*args, **kwargs) self.conv1 = tf.keras.layers.Conv2D( filters=num_filters_resnet_conv1, kernel_size=[kernel_size, kernel_size], strides=[1, 1], name="res3x3", padding="SAME", activation=None) self.conv2 = tf.keras.layers.Conv2D( filters=num_filters_resnet_conv2, kernel_size=[1, 1], strides=[1, 1], name="res1x1", padding="SAME", activation=None) def call(self, inputs, *args, **kwargs): h_i = tf.nn.relu(inputs) h_i = self.conv1(h_i) h_i = tf.nn.relu(h_i) h_i = self.conv2(h_i) return h_i class ResidualBlock(tf.keras.Model): def __init__(self, num_residual_layers, num_filters_resnet_conv1, num_filters_resnet_conv2, kernel_size, *args, **kwargs): super().__init__(*args, **kwargs) self.blocks = [] for _ in range(0, num_residual_layers): self.blocks.append(_ResidualInnerBlock( num_filters_resnet_conv1=num_filters_resnet_conv1, num_filters_resnet_conv2=num_filters_resnet_conv2, kernel_size=kernel_size)) def call(self, inputs, *args, **kwargs): h = inputs for block in self.blocks: h = tf.keras.layers.add([h, block(h)]) return h class ConvEncoderLayer(tf.keras.Model): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.residual_block = ResidualBlock( num_residual_layers=config.num_residual_layers, num_filters_resnet_conv1=config.num_filters_resnet_conv1, num_filters_resnet_conv2=config.num_filters_resnet_conv2, kernel_size=self.config.kernel_size) if 'batch_norm' in self.config._fields: if self.config.batch_norm: self.bn_resnet_block = tf.keras.layers.BatchNormalization( momentum=config.bn_momentum, renorm=config.bn_renorm) self.blocks = [] self.bn_blocks = [] for i in range(0, config.num_conv_layers): self.blocks.append(tf.keras.layers.Conv2D( filters=config.filter_sizes[i], kernel_size=[self.config.kernel_size, self.config.kernel_size], strides=[2, 2], padding="SAME", activation=tf.nn.relu if self.config.activation == "relu" else tf.nn.leaky_relu)) if 'batch_norm' in self.config._fields: if self.config.batch_norm: self.bn_blocks.append(tf.keras.layers.BatchNormalization( momentum=config.bn_momentum, renorm=config.bn_renorm)) def call(self, inputs, *args, **kwargs): x = inputs if 'batch_norm' in self.config._fields: if self.config.batch_norm: for block, bn_block in zip(self.blocks, self.bn_blocks): x = block(x) x = bn_block(x, *args, **kwargs) x = self.residual_block(x) x = self.bn_resnet_block(x, *args, **kwargs) else: for block in self.blocks: x = block(x) x = self.residual_block(x) else: for block in self.blocks: x = block(x) x = self.residual_block(x) return x class FcEncoderLayer(tf.keras.Model): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.blocks = tf.keras.Sequential() if self.config.use_dropout: self.do_blocks = tf.keras.Sequential() for i in range(0, config.num_fc_layers): self.blocks.add(tf.keras.layers.Dense( units=config.num_hiddens[i], activation=None if self.config.architecture == "linear" else (tf.nn.relu if self.config.activation=="relu" else tf.nn.leaky_relu))) if self.config.use_dropout: self.do_blocks.add( tf.keras.layers.Dropout(self.config.dropout_rate)) def call(self, inputs, *args, **kwargs): x = inputs if self.config.use_dropout: raise NotImplementedError else: x = self.blocks(x) return x class ConvDecoderLayer(tf.keras.Model): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.residual_block = ResidualBlock( num_residual_layers=config.num_residual_layers, num_filters_resnet_conv1=config.num_filters_resnet_conv1, num_filters_resnet_conv2=config.num_filters_resnet_conv2, kernel_size=self.config.kernel_size) if 'batch_norm' in self.config._fields: if self.config.batch_norm: self.bn_resnet_block = tf.keras.layers.BatchNormalization( momentum=config.bn_momentum, renorm=config.bn_renorm) self.blocks = [] self.bn_blocks = [] for i in range(0, config.num_conv_layers-1): self.blocks.append( tf.keras.layers.Convolution2DTranspose( filters=config.filter_sizes[config.num_conv_layers-2-i], kernel_size=[self.config.kernel_size, self.config.kernel_size], strides=[2, 2], padding="SAME", activation=tf.nn.relu if self.config.activation == "relu" else tf.nn.leaky_relu)) if 'batch_norm' in self.config._fields: if self.config.batch_norm: self.bn_blocks.append(tf.keras.layers.BatchNormalization( momentum=config.bn_momentum, renorm=config.bn_renorm)) self.output_layer = \ tf.keras.layers.Convolution2DTranspose( filters=config.num_filters_out, kernel_size=[self.config.kernel_size, self.config.kernel_size], strides=[2, 2], padding="SAME", activation=tf.nn.sigmoid if config.sigmoid_activation else None) def call(self, inputs, *args, **kwargs): x = inputs x = self.residual_block(x) if 'batch_norm' in self.config._fields: if self.config.batch_norm: x = self.bn_resnet_block(x, *args, **kwargs) for block, bn_block in zip(self.blocks, self.bn_blocks): x = block(x) x = bn_block(x, *args, **kwargs) else: for block in self.blocks: x = block(x) else: for block in self.blocks: x = block(x) x = self.output_layer(x) return x class FcDecoderLayer(tf.keras.Model): def __init__(self, config, units_out, **kwargs): super().__init__(**kwargs) self.config = config self.units_out = units_out def build(self, input_shape): self.blocks = tf.keras.Sequential() if self.config.use_dropout: self.do_blocks = tf.keras.Sequential() for i in range(0, self.config.num_fc_layers-1): self.blocks.add( tf.keras.layers.Dense( units=self.config.num_hiddens[self.config.num_fc_layers-2-i], activation=None if self.config.architecture == "linear" else (tf.nn.relu if self.config.activation=="relu" else tf.nn.leaky_relu))) if self.config.use_dropout: self.do_blocks.add( tf.keras.layers.Dropout(self.config.dropout_rate)) self.last_block = tf.keras.layers.Dense(units=self.units_out, activation=tf.nn.sigmoid if self.config.sigmoid_activation else None) self.built = True def call(self, inputs, *args, **kwargs): x = inputs if self.config.use_dropout: raise NotImplementedError else: x = self.blocks(x) x = self.last_block(x) return x class StochasticLayer(tf.keras.Model): def __init__(self, config, dim_latent, **kwargs): super().__init__(**kwargs) self.config = config self.flatten = tf.keras.layers.Flatten() self.dim_latent = dim_latent self._input_shape = None def build(self, input_shape): units = 1 for i in range(1, len(input_shape)): units *= input_shape[i] self._input_shape = input_shape self.dense_in = tf.keras.layers.Dense(units=self.dim_latent*2, activation=None) if 'batch_norm' in self.config._fields: if self.config.batch_norm: self.bn1 = tf.keras.layers.BatchNormalization( momentum=self.config.bn_momentum, renorm=self.config.bn_renorm) if self.config.use_dropout: self.drop1 = tf.keras.layers.Dropout(self.config.dropout_rate) self.dense_out = tf.keras.layers.Dense(units=units, activation=None) if self.config.use_dropout: self.drop2 = tf.keras.layers.Dropout(self.config.dropout_rate) if 'batch_norm' in self.config._fields: if self.config.batch_norm: self.bn2 = tf.keras.layers.BatchNormalization( momentum=self.config.bn_momentum, renorm=self.config.bn_renorm) self.built = True def call(self, inputs, *args, **kwargs): x_shape = tf.shape(inputs) x = self.flatten(inputs) mu_log_sigma2 = self.dense_in(x) if self.config.use_dropout: mu_log_sigma2 = self.drop1(mu_log_sigma2, *args, **kwargs) if 'batch_norm' in self.config._fields: if self.config.batch_norm: mu_log_sigma2 = self.bn1(mu_log_sigma2, *args, **kwargs) mu, log_sigma2 = tf.split(mu_log_sigma2, num_or_size_splits=2, axis=-1) eps = tf.random.normal(tf.shape(mu), 0, 1) z_std = tf.exp(log_sigma2 / 2) z = eps*z_std + mu inter_z = self.dense_out(z) if self.config.use_dropout: inter_z = self.drop2(inter_z, *args, **kwargs) if 'batch_norm' in self.config._fields: if self.config.batch_norm: inter_z = self.bn2(inter_z, *args, **kwargs) output = tf.reshape(inter_z, x_shape) kld_z = 0.5 * tf.reduce_sum( tf.square(mu) + tf.exp(log_sigma2) - log_sigma2 - 1, axis=1) out_dict = {"z": z, "log_sigma_2": log_sigma2, "mu": mu, "kld_z": kld_z} return output, out_dict def set_encodings(self, z, *args, **kwargs): inter_z = self.dense_out(z) if self.config.use_dropout: inter_z = self.drop2(inter_z, *args, **kwargs) if 'batch_norm' in self.config._fields: if self.config.batch_norm: inter_z = self.bn2(inter_z, *args, **kwargs) output_shape = [-1] for i in range(1, len(self._input_shape)): output_shape.append(self._input_shape[i]) output = tf.reshape(inter_z, output_shape) return output def generate(self, num_samples, *args, **kwargs): z = tf.random_normal([num_samples, self.dim_latent], 0, 1) inter_z = self.dense_out(z) if self.config.use_dropout: inter_z = self.drop2(inter_z, *args, **kwargs) if 'batch_norm' in self.config._fields: if self.config.batch_norm: inter_z = self.bn2(inter_z, *args, **kwargs) output_shape = [-1] for i in range(1, len(self._input_shape)): output_shape.append(self._input_shape[i]) output = tf.reshape(inter_z, output_shape) return output, {"z": z} def mean_encode(self, inputs, *args, **kwargs): x = self.flatten(inputs) mu_log_sigma2 = self.dense_in(x) if self.config.use_dropout: mu_log_sigma2 = self.drop1(mu_log_sigma2, *args, **kwargs) if 'batch_norm' in self.config._fields: if self.config.batch_norm: mu_log_sigma2 = self.bn1(mu_log_sigma2, *args, **kwargs) mu, _ = tf.split(mu_log_sigma2, num_or_size_splits=2, axis=-1) return {"z": mu} class DeterministicLayer(tf.keras.Model): def __init__(self, config, dim_latent, **kwargs): super().__init__(**kwargs) self.flatten = tf.keras.layers.Flatten() self.dim_latent = dim_latent self._input_shape = None self.config = config def build(self, input_shape): units = 1 for i in range(1, len(input_shape)): units *= input_shape[i] self._input_shape = input_shape self.dense_in = tf.keras.layers.Dense(units=self.dim_latent, activation=None) if self.config.use_dropout: self.drop1 = tf.keras.layers.Dropout(self.config.dropout_rate) self.dense_out = tf.keras.layers.Dense(units=units, activation=None) if self.config.use_dropout: self.drop2 = tf.keras.layers.Dropout(self.config.dropout_rate) self.built = True def call(self, inputs, generate=False, *args, **kwargs): x_shape = tf.shape(inputs) x = self.flatten(inputs) z = self.dense_in(x) if self.config.use_dropout: z = self.drop1(z, *args, **kwargs) inter_z = self.dense_out(z) if self.config.use_dropout: inter_z = self.drop2(inter_z, *args, **kwargs) output = tf.reshape(inter_z, x_shape) return output, {"z": z} def set_encodings(self, z, *args, **kwargs): inter_z = self.dense_out(z) if self.config.use_dropout: inter_z = self.drop2(inter_z, *args, **kwargs) output_shape = [-1] for i in range(1, len(self._input_shape)): output_shape.append(self._input_shape[i]) output = tf.reshape(inter_z, output_shape) return output class FcDetAutoencoderModel(tf.keras.Model): def __init__(self, config, dim_latent, *args, **kwargs): super().__init__(*args, **kwargs) self.config = config self.dim_latent = dim_latent self.ae_type = "deterministic" self._input_shape = None self.flatten = tf.keras.layers.Flatten() def build(self, input_shape): self._input_shape = input_shape units = input_shape[1]*input_shape[2]*input_shape[3] self.encoder = FcEncoderLayer(config=self.config) self.decoder = FcDecoderLayer(config=self.config, units_out=units) self.deterministic_layer = DeterministicLayer(config=self.config, dim_latent=self.dim_latent) self.built = True def call(self, inputs, *args, **kwargs): x = self.flatten(inputs) x = self.encoder(inputs=x, *args, **kwargs) x, output_dict = self.deterministic_layer(inputs=x, *args, **kwargs) x = self.decoder(inputs=x, *args, **kwargs) output_shape = [-1, self._input_shape[1], self._input_shape[2], self._input_shape[3]] x = tf.reshape(x, output_shape) output_dict.update({"output": x, "inputs": inputs}) return output_dict def encode(self, inputs, *args, **kwargs): x = self.flatten(inputs) x = self.encoder(inputs=x, *args, **kwargs) _, output_dict = self.deterministic_layer(inputs=x, *args, **kwargs) return output_dict def mean_encode(self, inputs, *args, **kwargs): return self.encode(inputs, *args, **kwargs) def decode(self, x, *args, **kwargs): x = self.deterministic_layer.set_encodings(x, *args, **kwargs) x = self.decoder(inputs=x, *args, **kwargs) output_shape = [-1, self._input_shape[1], self._input_shape[2], self._input_shape[3]] x = tf.reshape(x, output_shape) return {"output": x} def autoencode(self, inputs, *args, **kwargs): return self.call(inputs, *args, **kwargs) class FcVarAutoencoderModel(tf.keras.Model): def __init__(self, config, dim_latent, *args, **kwargs): super().__init__(*args, **kwargs) self.config = config self.dim_latent = dim_latent self.ae_type = "variational" self._input_shape = None self.flatten = tf.keras.layers.Flatten() def build(self, input_shape): self._input_shape = input_shape units = input_shape[1]*input_shape[2]*input_shape[3] self.encoder = FcEncoderLayer(config=self.config) self.decoder = FcDecoderLayer(config=self.config, units_out=units) self.stochastic_layer = StochasticLayer(config=self.config, dim_latent=self.dim_latent) self.built = True def call(self, inputs, *args, **kwargs): x = self.flatten(inputs) x = self.encoder(inputs=x, *args, **kwargs) x, output_dict = self.stochastic_layer(inputs=x, *args, **kwargs) x = self.decoder(inputs=x, *args, **kwargs) output_shape = [-1, self._input_shape[1], self._input_shape[2], self._input_shape[3]] x = tf.reshape(x, output_shape) output_dict.update({"output": x, "inputs": inputs}) return output_dict def encode(self, inputs, *args, **kwargs): x = self.flatten(inputs) x = self.encoder(inputs=x, *args, **kwargs) _, output_dict = self.stochastic_layer(inputs=x, *args, **kwargs) return output_dict def mean_encode(self, inputs, *args, **kwargs): x = self.flatten(inputs) x = self.encoder(inputs=x, *args, **kwargs) output_dict = self.stochastic_layer.mean_encode(inputs=x, *args, **kwargs) return output_dict def decode(self, x, *args, **kwargs): x = self.stochastic_layer.set_encodings(x, *args, **kwargs) x = self.decoder(inputs=x, *args, **kwargs) output_shape = [-1, self._input_shape[1], self._input_shape[2], self._input_shape[3]] x = tf.reshape(x, output_shape) return {"output": x} def generate(self, num_samples, *args, **kwargs): x, output_dict = self.stochastic_layer.generate(num_samples, *args, **kwargs) x = self.decoder(inputs=x, *args, **kwargs)
review2 = Rating.objects.create( addon=self.addon, body='review 2', user=user_factory()) review1.update(created=self.days_ago(1)) # Add a review belonging to a different add-on, a reply and a deleted # review. They should not be present in the list. review_deleted = Rating.objects.create( addon=self.addon, body='review deleted', user=review1.user) review_deleted.delete() Rating.objects.create( addon=self.addon, body='reply to review 2', reply_to=review2, user=user_factory()) Rating.objects.create( addon=addon_factory(), body='review other addon', user=review1.user) # Also add a deleted reply to the first review, it should not be shown. deleted_reply = Rating.objects.create( addon=self.addon, body='reply to review 1', reply_to=review1, user=user_factory()) deleted_reply.delete() assert Rating.unfiltered.count() == 6 response = self.client.get(self.url, {'addon': self.addon.pk}) assert response.status_code == 200 data = json.loads(response.content) assert data['count'] == 2 assert data['results'] assert len(data['results']) == 2 assert data['results'][0]['id'] == review2.pk assert data['results'][0]['reply'] is not None assert data['results'][1]['id'] == review1.pk assert data['results'][1]['reply'] is None def test_list_admin_show_deleted_if_requested(self): self.user = user_factory() self.grant_permission(self.user, 'Addons:Edit') self.client.login_api(self.user) review1 = Rating.objects.create( addon=self.addon, body='review 1', user=user_factory()) review2 = Rating.objects.create( addon=self.addon, body='review 2', user=user_factory()) review1.update(created=self.days_ago(1)) # Add a review belonging to a different add-on, a reply and a deleted # review. The deleted review should be present, not the rest. review_deleted = Rating.objects.create( addon=self.addon, body='review deleted', user=review1.user) review_deleted.update(created=self.days_ago(2)) review_deleted.delete() Rating.objects.create( addon=self.addon, body='reply to review 2', reply_to=review2, user=user_factory()) Rating.objects.create( addon=addon_factory(), body='review other addon', user=review1.user) # Also add a deleted reply to the first review, it should be shown # as a child of that review. deleted_reply = Rating.objects.create( addon=self.addon, body='reply to review 1', reply_to=review1, user=user_factory()) deleted_reply.delete() assert Rating.unfiltered.count() == 6 response = self.client.get( self.url, {'addon': self.addon.pk, 'filter': 'with_deleted'}) assert response.status_code == 200 data = json.loads(response.content) assert data['count'] == 3 assert data['results'] assert len(data['results']) == 3 assert data['results'][0]['id'] == review2.pk assert data['results'][0]['reply'] is not None assert data['results'][1]['id'] == review1.pk assert data['results'][1]['reply'] is not None assert data['results'][1]['reply']['id'] == deleted_reply.pk assert data['results'][2]['id'] == review_deleted.pk def test_list_weird_parameters(self): self.addon.update(slug=u'my-slûg') user = user_factory() Rating.objects.create(addon=self.addon, body='A review.', user=user) # No user, but addon is present. response = self.client.get( self.url, {'addon': self.addon.pk, 'user': u''}) assert response.status_code == 200 # No addon, but user is present. response = self.client.get(self.url, {'addon': u'', 'user': user.pk}) assert response.status_code == 200 # Addon parameter is utf-8. response = self.client.get(self.url, {'addon': u'my-slûg'}) assert response.status_code == 200 # User parameter is weird (it should be a pk, as string): 404. response = self.client.get( self.url, {'addon': self.addon.pk, 'user': u'çæ→'}) assert response.status_code == 400 data = json.loads(response.content) assert data == {'detail': 'user parameter should be an integer.'} # Version parameter is weird (it should be a pk, as string): 404. response = self.client.get( self.url, {'addon': self.addon.pk, 'version': u'çæ→'}) assert response.status_code == 400 data = json.loads(response.content) assert data == {'detail': 'version parameter should be an integer.'} def test_get_then_post_then_get_any_caching_is_cleared(self): """Make sure there is no overzealous caching going on when requesting the list of reviews for a given user+addon+version combination. Regression test for #5006.""" self.user = user_factory() self.client.login_api(self.user) # Do a get filtering on both addon and user: it should not find # anything. response = self.client.get(self.url, { 'addon': self.addon.pk, 'version': self.addon.current_version.pk, 'user': self.user.pk }) assert response.status_code == 200 data = json.loads(response.content) assert len(data['results']) == 0 assert data['count'] == 0 # Do a post to add a review by this user. response = self.client.post(self.url, { 'addon': self.addon.pk, 'body': u'test bodyé', 'score': 5, 'version': self.addon.current_version.pk}) assert response.status_code == 201 # Re-do the same get as before, should now find something since the # view is avoiding count() caching in this case. response = self.client.get(self.url, { 'addon': self.addon.pk, 'version': self.addon.current_version.pk, 'user': self.user.pk }) assert response.status_code == 200 data = json.loads(response.content) assert len(data['results']) == 1 assert data['count'] == 1 def test_no_throttle(self): self.user = user_factory() self.client.login_api(self.user) Rating.objects.create( addon=self.addon, body='review 1', user=user_factory(), rating=1) # We should be able to get as quick as we want. response = self.client.get(self.url, {'addon': self.addon.pk}) assert response.status_code == 200 response = self.client.get(self.url, {'addon': self.addon.pk}) assert response.status_code == 200 class TestRatingViewSetDelete(TestCase): client_class = APITestClient detail_url_name = 'rating-detail' def setUp(self): self.addon = addon_factory( guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon') self.user = user_factory() self.rating = Rating.objects.create( addon=self.addon, version=self.addon.current_version, rating=1, body='My review', user=self.user) self.url = reverse_ns( self.detail_url_name, kwargs={'pk': self.rating.pk}) def test_delete_anonymous(self): response = self.client.delete(self.url) assert response.status_code == 401 def test_delete_no_rights(self): other_user = user_factory() self.client.login_api(other_user) response = self.client.delete(self.url) assert response.status_code == 403 def test_delete_admin(self): admin_user = user_factory() self.grant_permission(admin_user, 'Addons:Edit') self.client.login_api(admin_user) response = self.client.delete(self.url) assert response.status_code == 204 assert Rating.objects.count() == 0 assert Rating.unfiltered.count() == 1 def test_delete_moderator_flagged(self): self.rating.update(editorreview=True) admin_user = user_factory() self.grant_permission(admin_user, 'Ratings:Moderate') self.client.login_api(admin_user) response = self.client.delete(self.url) assert response.status_code == 204 assert Rating.objects.count() == 0 assert Rating.unfiltered.count() == 1 def test_delete_moderator_not_flagged(self): admin_user = user_factory() self.grant_permission(admin_user, 'Ratings:Moderate') self.client.login_api(admin_user) response = self.client.delete(self.url) assert response.status_code == 403 assert Rating.objects.count() == 1 def test_delete_moderator_but_addon_author(self): admin_user = user_factory() self.addon.addonuser_set.create(user=admin_user) self.grant_permission(admin_user, 'Ratings:Moderate') self.client.login_api(admin_user) response = self.client.delete(self.url) assert response.status_code == 403 assert Rating.objects.count() == 1 def test_delete_owner(self): self.client.login_api(self.user) response = self.client.delete(self.url) assert response.status_code == 204 assert Rating.objects.count() == 0 assert Rating.unfiltered.count() == 1 def test_delete_owner_reply(self): addon_author = user_factory() self.addon.addonuser_set.create(user=addon_author) self.client.login_api(addon_author) reply = Rating.objects.create( addon=self.addon, reply_to=self.rating, body=u'Reply that will be delêted...', user=addon_author) self.url = reverse_ns(self.detail_url_name, kwargs={'pk': reply.pk}) response = self.client.delete(self.url) assert response.status_code == 204 assert Rating.objects.count() == 1 assert Rating.unfiltered.count() == 2 def test_delete_404(self): self.client.login_api(self.user) self.url = reverse_ns( self.detail_url_name, kwargs={'pk': self.rating.pk + 42}) response = self.client.delete(self.url) assert response.status_code == 404 assert Rating.objects.count() == 1 def test_no_throttle(self): # Add two reviews for different versions. rating_a = self.rating version_b = version_factory(addon=self.addon) rating_b = Rating.objects.create( addon=self.addon, version=version_b, rating=2, body='Second Review to delete', user=self.user) # And confirm we can rapidly delete them. self.client.login_api(self.user) response = self.client.delete( reverse_ns(self.detail_url_name, kwargs={'pk': rating_a.pk})) assert response.status_code == 204 response = self.client.delete( reverse_ns(self.detail_url_name, kwargs={'pk': rating_b.pk})) assert response.status_code == 204 assert Rating.objects.count() == 0 class TestRatingViewSetEdit(TestCase): client_class = APITestClient detail_url_name = 'rating-detail' def setUp(self): self.addon = addon_factory( guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon') self.user = user_factory(username='areviewuser') self.rating = Rating.objects.create( addon=self.addon, version=self.addon.current_version, rating=1, body=u'My revïew', user=self.user) self.url = reverse_ns( self.detail_url_name, kwargs={'pk': self.rating.pk}) def test_edit_anonymous(self): response = self.client.patch(self.url, {'body': u'løl!'}) assert response.status_code == 401 response = self.client.put(self.url, {'body': u'løl!'}) assert response.status_code == 405 def test_edit_no_rights(self): other_user = user_factory() self.client.login_api(other_user) response = self.client.patch(self.url, {'body': u'løl!'}) assert response.status_code == 403 response = self.client.put(self.url, {'body': u'løl!'}) assert response.status_code == 405 def test_edit_no_rights_even_reviewer(self): # Only admins can edit a review they didn't write themselves. reviewer_user = user_factory() self.grant_permission(reviewer_user, 'Addons:Review') self.client.login_api(reviewer_user) response = self.client.patch(self.url, {'body': u'løl!'}) assert response.status_code == 403 response = self.client.put(self.url, {'body': u'løl!'}) assert response.status_code == 405 def test_edit_owner_partial(self): original_created_date = self.days_ago(1) self.rating.update(created=original_created_date) self.client.login_api(self.user) response = self.client.patch(self.url, {'score': 2, 'body': u'løl!'}) assert response.status_code == 200 self.rating.reload() assert response.data['id'] == self.rating.pk assert response.data['body'] == unicode(self.rating.body) == u'løl!' assert response.data['score'] == self.rating.rating == 2 assert response.data['version'] == { 'id': self.rating.version.id, 'version': self.rating.version.version } assert self.rating.created == original_created_date activity_log = ActivityLog.objects.latest('pk') assert activity_log.user == self.user assert activity_log.arguments == [self.addon, self.rating] assert activity_log.action == amo.LOG.EDIT_RATING.id assert len(mail.outbox) == 0 def test_edit_owner_put_not_allowed(self): self.client.login_api(self.user) response = self.client.put(self.url, {'body': u'løl!'}) assert response.status_code == 405 def test_edit_dont_allow_version_to_be_edited(self): self.client.login_api(self.user) new_version = version_factory(addon=self.addon) response = self.client.patch(self.url, {'version': new_version.pk}) assert response.status_code == 400 assert response.data['version'] == [ u"You can't change the version of the add-on reviewed once " u"the review has been created."] def test_edit_dont_allow_addon_to_be_edited(self): self.client.login_api(self.user) new_addon = addon_factory() response = self.client.patch(self.url, {'addon': new_addon.pk}) assert response.status_code == 400 assert response.data['addon'] == [ u"You can't change the add-on of a review once it has been " u"created."] def test_edit_admin(self): original_review_user = self.rating.user admin_user = user_factory(username='mylittleadmin') self.grant_permission(admin_user, 'Addons:Edit') self.client.login_api(admin_user) response = self.client.patch(self.url, {'body': u'løl!'}) assert response.status_code == 200 self.rating.reload() assert response.data['id'] == self.rating.pk assert response.data['body'] == unicode(self.rating.body) == u'løl!' assert response.data['version'] == { 'id': self.rating.version.id, 'version': self.rating.version.version, } assert self.rating.user == original_review_user activity_log = ActivityLog.objects.latest('pk') assert activity_log.user == admin_user assert activity_log.arguments == [self.addon, self.rating] assert activity_log.action == amo.LOG.EDIT_RATING.id assert len(mail.outbox) == 0 def test_edit_reply(self): addon_author = user_factory() self.addon.addonuser_set.create(user=addon_author) self.client.login_api(addon_author) reply = Rating.objects.create( reply_to=self.rating, body=u'This is â reply', user=addon_author, addon=self.addon) self.url = reverse_ns(self.detail_url_name, kwargs={'pk': reply.pk}) response = self.client.patch(self.url, {'score': 5}) assert response.status_code == 200 # Since the review we're editing was a reply, rating' was an unknown # parameter and was ignored. reply.reload() assert reply.rating is None assert 'score' not in response.data activity_log = ActivityLog.objects.latest('pk') assert activity_log.user == addon_author
255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1) # filled cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) def plot_box(bboxes, img, id=None, color=None, line_thickness=None, cfg=None): """ 显示图片img和其所有的bboxes :param bboxes: [N, 5] 表示N个bbox, 格式仅支持np.array :param img: img格式为pytorch, 需要进行转换 :param color: :param line_thickness: """ img = img.permute(0, 2, 3, 1).contiguous()[0].numpy() if isinstance(img, torch.Tensor) else img # [C,H,W] ---> [H,W,C] img_size, _, _ = img.shape bboxes[:, :4] = xywh2xyxy(bboxes[:, :4]) tl = line_thickness or round( 0.002 * max(img.shape[0:2])) + 1 # line thickness color = color or [random.randint(0, 255) for _ in range(3)] for i, x in enumerate(bboxes): c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl) label = cfg["labels"][int(x[4])] if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[ 0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1) # filled cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [0, 0, 0], thickness=tf, lineType=cv2.LINE_AA) img = cv2.cvtColor(img * 255.0, cv2.COLOR_RGB2BGR).astype(np.float32) cv2.imwrite("../data/dataset{}.jpg".format(id), img) class CosineDecayLR(object): def __init__(self, optimizer, T_max, lr_init, lr_min=0., warmup=0): """ a cosine decay scheduler about steps, not epochs. :param optimizer: ex. optim.SGD :param T_max: max steps, and steps=epochs * batches :param lr_max: lr_max is init lr. :param warmup: in the training begin, the lr is smoothly increase from 0 to lr_init, which means "warmup", this means warmup steps, if 0 that means don't use lr warmup. """ super(CosineDecayLR, self).__init__() self.__optimizer = optimizer self.__T_max = T_max self.__lr_min = lr_min self.__lr_max = lr_init self.__warmup = warmup def step(self, t): if self.__warmup and t < self.__warmup: lr = self.__lr_max / self.__warmup * t else: T_max = self.__T_max - self.__warmup t = t - self.__warmup lr = self.__lr_min + 0.5 * (self.__lr_max - self.__lr_min) * ( 1 + np.cos(t / T_max * np.pi)) for param_group in self.__optimizer.param_groups: param_group["lr"] = lr class RandomHorizontalFilp(object): def __init__(self, p=0.5): self.p = p def __call__(self, img, bboxes): if random.random() < self.p: _, w_img, _ = img.shape # img = np.fliplr(img) img = img[:, ::-1, :] bboxes[:, [0, 2]] = w_img - bboxes[:, [2, 0]] return img, bboxes class RandomCrop(object): def __init__(self, p=0.5): self.p = p def __call__(self, img, bboxes): if random.random() < self.p: h_img, w_img, _ = img.shape max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1) max_l_trans = max_bbox[0] max_u_trans = max_bbox[1] max_r_trans = w_img - max_bbox[2] max_d_trans = h_img - max_bbox[3] crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans))) crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans))) crop_xmax = max(w_img, int(max_bbox[2] + random.uniform(0, max_r_trans))) crop_ymax = max(h_img, int(max_bbox[3] + random.uniform(0, max_d_trans))) img = img[crop_ymin: crop_ymax, crop_xmin: crop_xmax] bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin return img, bboxes class RandomAffine(object): def __init__(self, p=0.5): self.p = p def __call__(self, img, bboxes): if random.random() < self.p: h_img, w_img, _ = img.shape # 得到可以包含所有bbox的最大bbox max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1) max_l_trans = max_bbox[0] max_u_trans = max_bbox[1] max_r_trans = w_img - max_bbox[2] max_d_trans = h_img - max_bbox[3] tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1)) ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1)) M = np.array([[1, 0, tx], [0, 1, ty]]) img = cv2.warpAffine(img, M, (w_img, h_img)) bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty return img, bboxes class Resize(object): """ Resize the image to target size and transforms it into a color channel(BGR->RGB), as well as pixel value normalization([0,1]) """ def __init__(self, target_shape, correct_box=True): self.h_target, self.w_target = target_shape self.correct_box = correct_box def __call__(self, img, bboxes): h_org, w_org, _ = img.shape img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32) resize_ratio = min(1.0 * self.w_target / w_org, 1.0 * self.h_target / h_org) resize_w = int(resize_ratio * w_org) resize_h = int(resize_ratio * h_org) image_resized = cv2.resize(img, (resize_w, resize_h)) image_paded = np.full((self.h_target, self.w_target, 3), 128.0) dw = int((self.w_target - resize_w) / 2) dh = int((self.h_target - resize_h) / 2) image_paded[dh:resize_h + dh, dw:resize_w + dw, :] = image_resized image = image_paded / 255.0 # normalize to [0, 1] if self.correct_box: bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * resize_ratio + dw bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * resize_ratio + dh return image, bboxes return image class Mixup(object): def __init__(self, p=0.5): self.p = p def __call__(self, img_org, bboxes_org, img_mix, bboxes_mix): if random.random() > self.p: lam = np.random.beta(1.5, 1.5) img = lam * img_org + (1 - lam) * img_mix bboxes_org = np.concatenate( [bboxes_org, np.full((len(bboxes_org), 1), lam)], axis=1) bboxes_mix = np.concatenate( [bboxes_mix, np.full((len(bboxes_mix), 1), 1 - lam)], axis=1) bboxes = np.concatenate([bboxes_org, bboxes_mix]) else: img = img_org bboxes = np.concatenate( [bboxes_org, np.full((len(bboxes_org), 1), 1.0)], axis=1) return img, bboxes class LabelSmooth(object): def __init__(self, delta=0.01): self.delta = delta def __call__(self, onehot, num_classes): return onehot * (1 - self.delta) + self.delta * 1.0 / num_classes def select_device(device="", apex=False, batch_size=None): # device = "cpu" or "cuda:0" cpu_request = device.lower() == "cpu" if device and not cpu_request: # if device requested other than "cpu" os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable assert torch.cuda.is_available(), f"CUDA unavailable, invalid device {device} requested" cuda = False if cpu_request else torch.cuda.is_available() if cuda: c = 1024 ** 2 # bytes to MB gpu_count = torch.cuda.device_count() if gpu_count > 1 and batch_size: # check that batch_size is compatible with device_count assert batch_size % gpu_count == 0, f"batch-size {batch_size} not multiple of GPU count {gpu_count}" x = [torch.cuda.get_device_properties(i) for i in range(gpu_count)] s = "Using CUDA " + ("Apex " if apex else "") for i in range(0, gpu_count): if i == 1: s = " " * len(s) print( f"{s}\n\t+ device:{i} (name=`{x[i].name}`, total_memory={int(x[i].total_memory / c)}MB)") else: print("Using CPU") print("") # skip a line def load_annotations(anno_type, cfg): assert anno_type in ['train', 'test'], "You must choice one of the 'train' or 'test' for anno_type parameter" anno_path = os.path.join(cfg.DATA_ROOT, anno_type + "_annotation.txt") with open(anno_path, 'r') as f: annotations = list(filter(lambda x: len(x) > 0, f.readlines())) assert len(annotations) > 0, "No images found in {}".format(anno_path) return annotations class VocDataset(Dataset): def __init__(self, anno_file_type, image_size=416, cfg=None): self.image_size = image_size # For Multi-training self.cfg = cfg self.classes = self.cfg.CLASSES self.num_classes = len(self.classes) self.class_to_id = dict(zip(self.classes, range(self.num_classes))) self.annotations = load_annotations(anno_file_type, cfg) def __len__(self): return len(self.annotations) def __getitem__(self, item): img_org, bboxes_org = self.parse_annotation(self.annotations[item]) img_org = img_org.transpose(2, 0, 1) # HWC->CHW item_mix = random.randint(0, len(self.annotations) - 1) img_mix, bboxes_mix = self.parse_annotation( self.annotations[item_mix]) img_mix = img_mix.transpose(2, 0, 1) img, bboxes = Mixup()(img_org, bboxes_org, img_mix, bboxes_mix) del img_org, bboxes_org, img_mix, bboxes_mix label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.__creat_label( bboxes) img = torch.from_numpy(img).float() label_sbbox = torch.from_numpy(label_sbbox).float() label_mbbox = torch.from_numpy(label_mbbox).float() label_lbbox = torch.from_numpy(label_lbbox).float() sbboxes = torch.from_numpy(sbboxes).float() mbboxes = torch.from_numpy(mbboxes).float() lbboxes = torch.from_numpy(lbboxes).float() return img, label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes def parse_annotation(self, annotation): """ Data augument. :param annotation: Image' path and bboxes' coordinates, categories. ex. [image_path xmin,ymin,xmax,ymax,class_ind xmin,ymin,xmax,ymax,class_ind ...] :return: Return the enhanced image and bboxes. bbox'shape is [xmin, ymin, xmax, ymax, class_ind] """ anno = annotation.strip().split(' ') img_path = anno[0] img = cv2.imread(img_path) # H*W*C and C=BGR assert img is not None, 'File Not Found ' + img_path bboxes = np.array( [list(map(float, box.split(','))) for box in anno[1:]]) img, bboxes = RandomHorizontalFilp()(np.copy(img), np.copy(bboxes)) img, bboxes = RandomCrop()(np.copy(img), np.copy(bboxes)) img, bboxes = RandomAffine()(np.copy(img), np.copy(bboxes)) img, bboxes = Resize((self.image_size, self.image_size), True)( np.copy(img), np.copy(bboxes)) return img, bboxes def __creat_label(self, bboxes): """ Label assignment. For a single picture all GT box bboxes are assigned anchor. 1、Select a bbox in order, convert its coordinates("xyxy") to "xywh"; and scale bbox' xywh by the strides. 2、Calculate the iou between the each detection layer'anchors and the bbox in turn, and select the largest anchor to predict the bbox.If the ious of
tf.cast(drop, tf.float32) masked_x = x * (1.0 - drop_mask)[Ellipsis, None] + missing_x * drop_mask[Ellipsis, None] shuffled_x = tf.concat([masked_x[1:], masked_x[:1]], 0) switch_mask = tf.cast(switch, tf.float32) masked_x = masked_x * ( 1.0 - switch_mask)[Ellipsis, None] + shuffled_x * switch_mask[Ellipsis, None] full_mask = tf.cast(drop | switch | keep, tf.float32) return masked_x, full_mask def add_positional_encoding(self, x): if self.positional_encoding_type == 'identity': time_encoding = tf.eye( self.attention_length, batch_shape=(tf.shape(x)[0],)) x = tf.concat([time_encoding, x], -1) elif self.positional_encoding_type == 'sinusoid': def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model)) return pos * angle_rates angle_rads = get_angles( np.arange(self.attention_length)[:, np.newaxis], np.arange(self.attention_input_dim)[np.newaxis, :], self.attention_input_dim) # apply sin to even indices in the array; 2i angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) pos_encoding = angle_rads[np.newaxis, Ellipsis] x += pos_encoding elif self.positional_encoding_type == 'zero': x = x else: raise NotImplementedError return x def compute_energy(self, embeddings, other_embeddings): """Computes matrix of energies between every pair of (embedding, other_embedding).""" energies = tf.matmul(embeddings, other_embeddings, transpose_b=True) return energies def fit(self, states, actions, rewards): """Updates critic parameters. Args: states: Batch of sequences of states. actions: Batch of sequences of actions. rewards: Batch of sequences of rewards. Returns: Dictionary with information to track. """ states = states[:, :self.attention_length, :] actions = actions[:, :self.attention_length, :] rewards = rewards[:, :self.attention_length, None] batch_size = tf.shape(states)[0] with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(self.all_variables) all_states = tf.reshape(states, [batch_size * self.attention_length, -1]) all_embeddings = self.embedder(all_states, stop_gradient=False) embeddings = tf.reshape( all_embeddings, [batch_size, self.attention_length, self.embedding_dim]) states_in = embeddings if self.embed_on_input else states actions_in = actions rewards_in = rewards if self.input_dimension_dropout > 0: states_in *= tf.cast( tf.random.uniform(tf.shape(states_in)) < self.input_dimension_dropout, tf.float32) states_in *= 1 / (1 - self.input_dimension_dropout) actions_in *= tf.cast( tf.random.uniform(tf.shape(actions_in)) < self.input_dimension_dropout, tf.float32) actions_in *= 1 / (1 - self.input_dimension_dropout) rewards_in *= tf.cast( tf.random.uniform(tf.shape(rewards_in)) < self.input_dimension_dropout, tf.float32) rewards_in *= 1 / (1 - self.input_dimension_dropout) states_in, states_mask = self._prepare_input(states_in, self.missing_state) actions_in, actions_mask = self._prepare_input(actions_in, self.missing_action) rewards_in, rewards_mask = self._prepare_input(rewards_in, self.missing_reward) attention_in = [states_in] if self.input_actions: attention_in.append(actions_in) if self.input_rewards: attention_in.append(rewards_in) attention_in = tf.concat(attention_in, -1) attention_in = self.add_positional_encoding(attention_in) attention_out = self.transformer(attention_in, training=True) # State prediction loss. states_mask_indices = tf.where(states_mask > 0.0) pred_embeddings = tf.gather_nd(attention_out[Ellipsis, :self.output_dim], states_mask_indices) if self.extra_embedder: true_states = tf.gather_nd(states, states_mask_indices) true_embeddings = self.extra_embedder(true_states, stop_gradient=False) else: true_embeddings = tf.gather_nd(embeddings, states_mask_indices) energies = self.compute_energy(pred_embeddings, true_embeddings) positive_loss = tf.linalg.diag_part(energies) negative_loss = tf.reduce_logsumexp(energies, axis=-1) state_loss = -positive_loss + negative_loss correct = tf.cast(positive_loss >= tf.reduce_max(energies, axis=-1), tf.float32) if self.predict_actions or self.policy_decoder_on_embeddings: if self.policy_decoder_on_embeddings: policy_decoder_in = all_embeddings all_actions = tf.reshape( actions, [batch_size * self.attention_length, self.action_dim]) else: actions_mask_indices = tf.where(actions_mask > 0.0) idx = -1 if self.predict_rewards else tf.shape(attention_out)[-1] policy_decoder_in = tf.gather_nd( attention_out[Ellipsis, self.output_dim:idx], actions_mask_indices) all_actions = tf.gather_nd(actions, actions_mask_indices) action_log_probs = self.policy_decoder.log_probs( policy_decoder_in, all_actions) _, policy_log_probs = self.policy_decoder( policy_decoder_in, sample=True, with_log_probs=True) alpha = tf.exp(self.log_alpha) alpha_loss = alpha * tf.stop_gradient(-policy_log_probs - self.target_entropy) reconstruct_loss = -action_log_probs + tf.stop_gradient( alpha) * policy_log_probs action_loss = alpha_loss + reconstruct_loss else: action_loss = 0.0 if self.predict_rewards or self.reward_decoder_on_embeddings: if self.reward_decoder_on_embeddings: reward_decoder_in = all_embeddings pred_reward = self.reward_decoder(reward_decoder_in) pred_reward = tf.reshape(pred_reward, [batch_size, self.attention_length, 1]) pred_reward = tf.gather(pred_reward, tf.where(rewards_mask > 0.0)) else: pred_reward = tf.gather(attention_out[Ellipsis, -1:], tf.where(rewards_mask > 0.0)) true_reward = tf.gather(rewards, tf.where(rewards_mask > 0.0)) reward_loss = huber(pred_reward - true_reward) else: reward_loss = 0.0 loss = tf.reduce_mean(state_loss) + tf.reduce_mean( action_loss) + tf.reduce_mean(reward_loss) grads = tape.gradient(loss, self.all_variables) self.optimizer.apply_gradients(zip(grads, self.all_variables)) return { 'embed_loss': loss, 'positive_loss': tf.reduce_mean(positive_loss), 'negative_loss': tf.reduce_mean(negative_loss), 'state_loss': tf.reduce_mean(state_loss), 'state_correct': tf.reduce_mean(correct), 'action_loss': tf.reduce_mean(action_loss), } @tf.function def update_step(self, replay_buffer_iter): states, actions, rewards, _, _ = next(replay_buffer_iter) return self.fit(states, actions, rewards) def get_input_state_dim(self): return self.embedder.embedding_dim class MomentumACLLearner(ACLLearner): """Extension of ACLLearner.""" def __init__(self, state_dim, action_spec, embedding_dim = 256, num_distributions = None, preprocess_dim = 256, hidden_dims = (256, 256), sequence_length = 2, ctx_length = None, downstream_input_mode = 'embed', learning_rate = None, num_heads = 4, drop_probability = 0.3, switch_probability = 0.15, keep_probability = 0.15, input_dimension_dropout = 0.0, input_actions = True, predict_actions = True, policy_decoder_on_embeddings = False, input_rewards = True, predict_rewards = False, reward_decoder_on_embeddings = False, embed_on_input = True, extra_embedder = True, positional_encoding_type = 'identity', direction = 'backward', residual_dims = (256,), tau = 0.05, target_update_period = 1): super().__init__( state_dim, action_spec, embedding_dim=embedding_dim, num_distributions=num_distributions, preprocess_dim=preprocess_dim, hidden_dims=hidden_dims, sequence_length=sequence_length, ctx_length=ctx_length, downstream_input_mode=downstream_input_mode, learning_rate=learning_rate, num_heads=num_heads, drop_probability=drop_probability, switch_probability=switch_probability, keep_probability=keep_probability, input_dimension_dropout=input_dimension_dropout, input_actions=input_actions, predict_actions=predict_actions, policy_decoder_on_embeddings=policy_decoder_on_embeddings, input_rewards=input_rewards, predict_rewards=predict_rewards, reward_decoder_on_embeddings=reward_decoder_on_embeddings, embed_on_input=embed_on_input, extra_embedder=extra_embedder, positional_encoding_type=positional_encoding_type, direction=direction) self.residual_mlp = EmbedNet( embedding_dim, embedding_dim=embedding_dim, hidden_dims=residual_dims) self.embedder_target = EmbedNet( state_dim, embedding_dim=self.embedding_dim, hidden_dims=hidden_dims) soft_update(self.embedder, self.embedder_target, tau=1.0) learning_rate = learning_rate or 3e-4 self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) self.tau = tau self.target_update_period = target_update_period self.all_variables += self.residual_mlp.variables def fit(self, states, actions, rewards): """Updates critic parameters. Args: states: Batch of sequences of states. actions: Batch of sequences of actions. rewards: Batch of sequences of rewards. Returns: Dictionary with information to track. """ states = states[:, :self.attention_length, :] actions = actions[:, :self.attention_length, :] rewards = rewards[:, :self.attention_length, None] batch_size = tf.shape(states)[0] with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(self.all_variables) all_states = tf.reshape(states, [batch_size * self.attention_length, -1]) all_embeddings = self.embedder(all_states, stop_gradient=False) all_embeddings += self.residual_mlp(all_embeddings, stop_gradient=False) embeddings = tf.reshape( all_embeddings, [batch_size, self.attention_length, self.embedding_dim]) states_in = embeddings if self.embed_on_input else states actions_in = actions rewards_in = rewards if self.input_dimension_dropout > 0: states_in *= tf.cast( tf.random.uniform(tf.shape(states_in)) < self.input_dimension_dropout, tf.float32) states_in *= 1 / (1 - self.input_dimension_dropout) actions_in *= tf.cast( tf.random.uniform(tf.shape(actions_in)) < self.input_dimension_dropout, tf.float32) actions_in *= 1 / (1 - self.input_dimension_dropout) rewards_in *= tf.cast( tf.random.uniform(tf.shape(rewards_in)) < self.input_dimension_dropout, tf.float32) rewards_in *= 1 / (1 - self.input_dimension_dropout) states_in, states_mask = self._prepare_input(states_in, self.missing_state) actions_in, actions_mask = self._prepare_input(actions_in, self.missing_action) rewards_in, rewards_mask = self._prepare_input(rewards_in, self.missing_reward) attention_in = [states_in] if self.input_actions: attention_in.append(actions_in) if self.input_rewards: attention_in.append(rewards_in) attention_in = tf.concat(attention_in, -1) attention_in = self.add_positional_encoding(attention_in) attention_out = self.transformer(attention_in, training=True) # State prediction loss. states_mask_indices = tf.where(states_mask > 0.0) pred_embeddings = tf.gather_nd(attention_out[Ellipsis, :self.output_dim], states_mask_indices) if self.extra_embedder: true_states = tf.gather_nd(states, states_mask_indices) true_embeddings = self.extra_embedder(true_states, stop_gradient=False) else: true_embeddings = tf.gather_nd(embeddings, states_mask_indices) true_embeddings = self.embedder_target(all_states, stop_gradient=True) true_embeddings = tf.reshape( true_embeddings, [batch_size, self.attention_length, self.embedding_dim]) true_embeddings = tf.gather_nd(true_embeddings, states_mask_indices) energies = self.compute_energy(pred_embeddings, true_embeddings) positive_loss = tf.linalg.diag_part(energies) negative_loss = tf.reduce_logsumexp(energies, axis=-1) state_loss = -positive_loss + negative_loss correct = tf.cast(positive_loss >= tf.reduce_max(energies, axis=-1), tf.float32) if self.predict_actions or self.policy_decoder_on_embeddings: if self.policy_decoder_on_embeddings: policy_decoder_in = all_embeddings all_actions = tf.reshape( actions, [batch_size * self.attention_length, self.action_dim]) else: actions_mask_indices = tf.where(actions_mask > 0.0) idx = -1 if self.predict_rewards else tf.shape(attention_out)[-1] policy_decoder_in = tf.gather_nd( attention_out[Ellipsis, self.output_dim:idx], actions_mask_indices) all_actions = tf.gather_nd(actions, actions_mask_indices) action_log_probs = self.policy_decoder.log_probs( policy_decoder_in, all_actions) _, policy_log_probs = self.policy_decoder( policy_decoder_in, sample=True, with_log_probs=True) alpha = tf.exp(self.log_alpha) alpha_loss = alpha * tf.stop_gradient(-policy_log_probs - self.target_entropy) reconstruct_loss = -action_log_probs + tf.stop_gradient( alpha) * policy_log_probs action_loss = alpha_loss + reconstruct_loss else: action_loss = 0.0 if self.predict_rewards or self.reward_decoder_on_embeddings: if self.reward_decoder_on_embeddings: reward_decoder_in = all_embeddings pred_reward = self.reward_decoder(reward_decoder_in) pred_reward = tf.reshape(pred_reward, [batch_size, self.attention_length, 1]) pred_reward = tf.gather(pred_reward, tf.where(rewards_mask > 0.0)) else: pred_reward = tf.gather(attention_out[Ellipsis, -1:], tf.where(rewards_mask > 0.0)) true_reward = tf.gather(rewards, tf.where(rewards_mask > 0.0)) reward_loss = huber(pred_reward - true_reward) else: reward_loss = 0.0 loss = tf.reduce_mean(state_loss) + tf.reduce_mean( action_loss) + tf.reduce_mean(reward_loss) grads = tape.gradient(loss, self.all_variables) self.optimizer.apply_gradients(zip(grads, self.all_variables)) if self.optimizer.iterations % self.target_update_period == 0: soft_update(self.embedder, self.embedder_target, tau=self.tau) return { 'embed_loss': loss, 'positive_loss': tf.reduce_mean(positive_loss), 'negative_loss': tf.reduce_mean(negative_loss), 'state_loss': tf.reduce_mean(state_loss), 'state_correct': tf.reduce_mean(correct), 'action_loss': tf.reduce_mean(action_loss), } class VpnLearner(tf.keras.Model): """A learner for value prediction network.""" def __init__(self, state_dim, action_dim, embedding_dim = 256, hidden_dims = (256, 256), sequence_length = 2, learning_rate = None, discount = 0.95, tau = 1.0, target_update_period = 1000): """Creates networks. Args: state_dim: State size. action_dim: Action size. embedding_dim: Embedding size. hidden_dims: List of hidden dimensions. sequence_length: Expected length of sequences provided as input learning_rate: Learning rate. """ super().__init__() self.input_dim = state_dim self.embedding_dim = embedding_dim self.sequence_length = sequence_length self.discount = discount self.tau = tau self.target_update_period = target_update_period self.embedder = EmbedNet( state_dim, embedding_dim=self.embedding_dim, hidden_dims=hidden_dims) self.f_value = keras_utils.create_mlp( self.embedding_dim, 1, hidden_dims=hidden_dims, activation=tf.nn.swish) self.f_value_target = keras_utils.create_mlp( self.embedding_dim, 1, hidden_dims=hidden_dims, activation=tf.nn.swish) self.f_trans = keras_utils.create_mlp( self.embedding_dim + action_dim, self.embedding_dim, hidden_dims=hidden_dims, activation=tf.nn.swish) self.f_out = keras_utils.create_mlp( self.embedding_dim + action_dim, 2, hidden_dims=hidden_dims, activation=tf.nn.swish) learning_rate = learning_rate or 1e-4 self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) self.all_variables = self.variables soft_update(self.f_value, self.f_value_target, tau=1.0) @tf.function def call(self, states, actions = None, stop_gradient = True): """Returns embedding. Args: states: 2 or 3 dimensional state tensors. downstream_input_mode: mode of downstream inputs, e.g., state-ctx. stop_gradient: Whether to stop_gradient. Returns: Embedding. """ assert
<reponame>jamilatta/scielo-manager # -*- encoding: utf-8 -*- import urllib import hashlib import logging import choices import caching.base from scielomanager import tools try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict from django.db import ( models, transaction, IntegrityError, DatabaseError, ) from django.core.exceptions import ObjectDoesNotExist from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ugettext as __ from django.conf import settings from django.db.models.signals import post_save, pre_save from django.dispatch import receiver from django.template.defaultfilters import slugify from django.core.exceptions import ImproperlyConfigured from scielo_extensions import modelfields from tastypie.models import create_api_key import jsonfield from scielomanager.utils import base28 from . import modelmanagers User.__bases__ = (caching.base.CachingMixin, models.Model) User.add_to_class('objects', caching.base.CachingManager()) logger = logging.getLogger(__name__) EVENT_TYPES = [(ev_type, ev_type) for ev_type in ['added', 'deleted', 'updated']] ISSUE_DEFAULT_LICENSE_HELP_TEXT = _(u"If not defined, will be applied the related journal's use license. \ The SciELO default use license is BY-NC. Please visit: http://ref.scielo.org/jf5ndd (5.2.11. Política de direitos autorais) for more details.") def get_user_collections(user_id): """ Return all the collections of a given user, The returned collections are the collections where the user could have access by the collections bar. """ user_collections = User.objects.get(pk=user_id).usercollections_set.all().order_by( 'collection__name') return user_collections def get_journals_default_use_license(): """ Returns the default use license for all new Journals. This callable is passed as the default value on Journal.use_license field. The default use license is the one defined on SciELO criteria, and at the time is BY-NC. See http://ref.scielo.org/jf5ndd for more information. """ try: return UseLicense.objects.get(is_default=True) except UseLicense.DoesNotExist: raise ImproperlyConfigured("There is no UseLicense set as default") class AppCustomManager(caching.base.CachingManager): """ Domain specific model managers. """ def available(self, is_available=True): """ Filter the queryset based on its availability. """ data_queryset = self.get_query_set() if not isinstance(is_available, bool): try: if int(is_available) == 0: is_available = False else: is_available = True except (ValueError, TypeError): is_available = True data_queryset = data_queryset.filter(is_trashed=not is_available) return data_queryset class JournalCustomManager(AppCustomManager): def all_by_user(self, user, is_available=True, pub_status=None): """ Retrieves all the user's journals, contextualized by their default collection. """ default_collection = Collection.objects.get_default_by_user(user) objects_all = self.available(is_available).filter( collections=default_collection).distinct() if pub_status: if pub_status in [stat[0] for stat in choices.JOURNAL_PUBLICATION_STATUS]: objects_all = objects_all.filter(pub_status=pub_status) return objects_all def recents_by_user(self, user): """ Retrieves the recently modified objects related to the given user. """ default_collection = Collection.objects.get_default_by_user(user) recents = self.filter( collections=default_collection).distinct().order_by('-updated')[:5] return recents def all_by_collection(self, collection, is_available=True): objects_all = self.available(is_available).filter( collections=collection) return objects_all def by_issn(self, issn): """ Get the journal assigned to `issn`, being electronic or print. In some cases more than one instance of the same journal will be returned due to the fact that journals present in more than one collection is handled separately. """ if issn == '': return Journal.objects.none() journals = Journal.objects.filter( models.Q(print_issn=issn) | models.Q(eletronic_issn=issn) ) return journals class SectionCustomManager(AppCustomManager): def all_by_user(self, user, is_available=True): default_collection = Collection.objects.get_default_by_user(user) objects_all = self.available(is_available).filter( journal__collections=default_collection).distinct() return objects_all class IssueCustomManager(AppCustomManager): def all_by_collection(self, collection, is_available=True): objects_all = self.available(is_available).filter( journal__collections=collection) return objects_all class InstitutionCustomManager(AppCustomManager): """ Add capabilities to Institution subclasses to retrieve querysets based on user's collections. """ def all_by_user(self, user, is_available=True): default_collection = Collection.objects.get_default_by_user(user) objects_all = self.available(is_available).filter( collections__in=[default_collection]).distinct() return objects_all class CollectionCustomManager(AppCustomManager): def all_by_user(self, user): """ Returns all the Collections related to the given user. """ collections = self.filter(usercollections__user=user).order_by( 'name') return collections def get_default_by_user(self, user): """ Returns the Collection marked as default by the given user. If none satisfies this condition, the first instance is then returned. Like any manager method that does not return Querysets, `get_default_by_user` raises DoesNotExist if there is no result for the given parameter. """ collections = self.filter(usercollections__user=user, usercollections__is_default=True).order_by('name') if not collections.count(): try: collection = self.all_by_user(user)[0] except IndexError: raise Collection.DoesNotExist() else: collection.make_default_to_user(user) return collection return collections[0] def get_managed_by_user(self, user): """ Returns all collections managed by a given user. """ collections = self.filter(usercollections__user=user, usercollections__is_manager=True).order_by('name') return collections class RegularPressReleaseCustomManager(caching.base.CachingManager): def by_journal_pid(self, journal_pid): """ Returns all PressReleases related to a Journal, given its PID. """ journals = Journal.objects.filter( models.Q(print_issn=journal_pid) | models.Q(eletronic_issn=journal_pid)) preleases = self.filter(issue__journal__in=journals.values('id')).select_related('translations') return preleases def all_by_journal(self, journal): """ Returns all PressReleases related to a Journal """ preleases = self.filter(issue__journal=journal) return preleases def by_issue_pid(self, issue_pid): """ Returns all PressReleases related to an Issue, given its PID. """ issn_slice = slice(0, 9) year_slice = slice(9, 13) order_slice = slice(13, None) issn = issue_pid[issn_slice] year = issue_pid[year_slice] order = int(issue_pid[order_slice]) preleases_qset = self.by_journal_pid(issn) return preleases_qset.filter(issue__publication_year=year).filter(issue__order=order) class AheadPressReleaseCustomManager(caching.base.CachingManager): def by_journal_pid(self, journal_pid): """ Returns all PressReleases related to a Journal, given its PID. """ preleases = self.filter(models.Q(journal__print_issn=journal_pid) | models.Q(journal__eletronic_issn=journal_pid)) return preleases class Language(caching.base.CachingMixin, models.Model): """ Represents ISO 639-1 Language Code and its language name in English. Django automaticaly translates language names, if you write them right. http://en.wikipedia.org/wiki/ISO_639-1_language_matrix """ objects = caching.base.CachingManager() nocacheobjects = models.Manager() iso_code = models.CharField(_('ISO 639-1 Language Code'), max_length=2) name = models.CharField(_('Language Name (in English)'), max_length=64) def __unicode__(self): return __(self.name) class Meta: ordering = ['name'] class UserProfile(caching.base.CachingMixin, models.Model): objects = caching.base.CachingManager() nocacheobjects = models.Manager() user = models.OneToOneField(User) email = models.EmailField(_('E-mail'), blank=False, unique=True, null=False) @property def gravatar_id(self): return hashlib.md5(self.email.lower().strip()).hexdigest() @property def avatar_url(self): params = urllib.urlencode({'s': 18, 'd': 'mm'}) return '{0}/avatar/{1}?{2}'.format(getattr(settings, 'GRAVATAR_BASE_URL', 'https://secure.gravatar.com'), self.gravatar_id, params) @property def get_default_collection(self): """ Return the default collection for this user """ return Collection.objects.get_default_by_user(self.user) def save(self, force_insert=False, force_update=False): self.user.email = self.email self.user.save() return super(UserProfile, self).save(force_insert, force_update) class Collection(caching.base.CachingMixin, models.Model): objects = CollectionCustomManager() nocacheobjects = models.Manager() collection = models.ManyToManyField(User, related_name='user_collection', through='UserCollections', null=True, blank=True, ) name = models.CharField(_('Collection Name'), max_length=128, db_index=True, ) name_slug = models.SlugField(unique=True, db_index=True, blank=True, null=True) url = models.URLField(_('Instance URL'), ) logo = models.ImageField(_('Logo'), upload_to='img/collections_logos', null=True, blank=True, ) acronym = models.CharField(_('Sigla'), max_length=16, db_index=True, blank=True, ) country = models.CharField(_('Country'), max_length=32,) state = models.CharField(_('State'), max_length=32, null=False, blank=True,) city = models.CharField(_('City'), max_length=32, null=False, blank=True,) address = models.TextField(_('Address'),) address_number = models.CharField(_('Number'), max_length=8,) address_complement = models.CharField(_('Complement'), max_length=128, null=False, blank=True,) zip_code = models.CharField(_('Zip Code'), max_length=16, null=True, blank=True, ) phone = models.CharField(_('Phone Number'), max_length=16, null=False, blank=True, ) fax = models.CharField(_('Fax Number'), max_length=16, null=False, blank=True, ) email = models.EmailField(_('Email'), ) def __unicode__(self): return unicode(self.name) class Meta: ordering = ['name'] permissions = (("list_collection", "Can list Collections"),) def save(self, *args, **kwargs): self.name_slug = slugify(self.name) super(Collection, self).save(*args, **kwargs) def add_user(self, user, is_default=False, is_manager=False): """ Add the user to the current collection. """ UserCollections.objects.create(collection=self, user=user, is_default=is_default, is_manager=is_manager) def remove_user(self, user): """ Removes the user from the current collection. If the user isn't already related to the given collection, it will do nothing, silently. """ try: uc = UserCollections.objects.get(collection=self, user=user) except UserCollections.DoesNotExist: return None else: uc.delete() def make_default_to_user(self, user): """ Makes the current collection, the user's default. """ UserCollections.objects.filter(user=user).update(is_default=False) uc, created = UserCollections.objects.get_or_create( collection=self, user=user) uc.is_default = True uc.save() def is_default_to_user(self, user): """ Returns a boolean value depending if the current collection is set as default to the given user. """ try: uc = UserCollections.objects.get(collection=self, user=user) return uc.is_default except UserCollections.DoesNotExist: return False def is_managed_by_user(self, user): """ Returns a boolean value depending if the current collection is managed by the given user. """ try: uc = UserCollections.objects.get(collection=self, user=user) return uc.is_manager except UserCollections.DoesNotExist: return False class UserCollections(caching.base.CachingMixin, models.Model): objects = caching.base.CachingManager() nocacheobjects = models.Manager() user = models.ForeignKey(User) collection = models.ForeignKey(Collection) is_default = models.BooleanField(_('Is default'), default=False, null=False, blank=False) is_manager = models.BooleanField(_('Is manager of the collection?'), default=False, null=False, blank=False) class Meta: unique_together = ("user", "collection", ) class Institution(caching.base.CachingMixin, models.Model): #Custom manager objects = AppCustomManager() nocacheobjects = models.Manager() created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) name = models.CharField(_('Institution Name'), max_length=256, db_index=True) complement = models.TextField(_('Institution Complements'), blank=True, default="") acronym = models.CharField(_('Sigla'), max_length=16, db_index=True, blank=True) country = models.CharField(_('Country'), max_length=32) state = models.CharField(_('State'), max_length=32, null=False, blank=True) city = models.CharField(_('City'), max_length=32, null=False, blank=True) address = models.TextField(_('Address')) address_number = models.CharField(_('Number'), max_length=8) address_complement = models.CharField(_('Address Complement'), max_length=128, null=False, blank=True) zip_code = models.CharField(_('Zip Code'), max_length=16, null=True, blank=True) phone = models.CharField(_('Phone Number'), max_length=16, null=False, blank=True) fax = models.CharField(_('Fax Number'), max_length=16, null=False, blank=True) cel = models.CharField(_('Cel Number'), max_length=16, null=False, blank=True) email = models.EmailField(_('E-mail')) is_trashed = models.BooleanField(_('Is trashed?'), default=False, db_index=True) def __unicode__(self): return u'%s' % (self.name) class Meta: ordering = ['name'] class Sponsor(Institution): objects = InstitutionCustomManager() nocacheobjects = models.Manager() userobjects = modelmanagers.SponsorManager() collections = models.ManyToManyField(Collection) class Meta: permissions = (("list_sponsor", "Can list Sponsors"),) class SubjectCategory(caching.base.CachingMixin, models.Model): #Custom manager objects = JournalCustomManager() nocacheobjects = models.Manager() term = models.CharField(_('Term'), max_length=256, db_index=True) def __unicode__(self): return self.term class StudyArea(caching.base.CachingMixin, models.Model): objects = caching.base.CachingManager() nocacheobjects = models.Manager() study_area = models.CharField(_('Study Area'), max_length=256, choices=sorted(choices.SUBJECTS, key=lambda SUBJECTS: SUBJECTS[1])) def __unicode__(self): return self.study_area class Journal(caching.base.CachingMixin, models.Model): """ Represents a Journal that is managed
| | Cf. option | | | | | "ad_weight". | | +------------------+-----------------+------------------+------------------+ | compiler | OT_STRING | Just-in-time | casadi::Function | | | | compiler plugin | Internal | | | | to be used. | | +------------------+-----------------+------------------+------------------+ | derivative_of | OT_FUNCTION | The function is | casadi::Function | | | | a derivative of | Internal | | | | another | | | | | function. The | | | | | type of | | | | | derivative | | | | | (directional | | | | | derivative, | | | | | Jacobian) is | | | | | inferred from | | | | | the function | | | | | name. | | +------------------+-----------------+------------------+------------------+ | discrete | OT_BOOLVECTOR | Indicates which | casadi::Conic | | | | of the variables | | | | | are discrete, | | | | | i.e. integer- | | | | | valued | | +------------------+-----------------+------------------+------------------+ | enable_fd | OT_BOOL | Enable | casadi::Function | | | | derivative | Internal | | | | calculation by | | | | | finite | | | | | differencing. | | | | | [default: | | | | | false]] | | +------------------+-----------------+------------------+------------------+ | enable_forward | OT_BOOL | Enable | casadi::Function | | | | derivative | Internal | | | | calculation | | | | | using generated | | | | | functions for | | | | | Jacobian-times- | | | | | vector products | | | | | - typically | | | | | using forward | | | | | mode AD - if | | | | | available. | | | | | [default: true] | | +------------------+-----------------+------------------+------------------+ | enable_jacobian | OT_BOOL | Enable | casadi::Function | | | | derivative | Internal | | | | calculation | | | | | using generated | | | | | functions for | | | | | Jacobians of all | | | | | differentiable | | | | | outputs with | | | | | respect to all | | | | | differentiable | | | | | inputs - if | | | | | available. | | | | | [default: true] | | +------------------+-----------------+------------------+------------------+ | enable_reverse | OT_BOOL | Enable | casadi::Function | | | | derivative | Internal | | | | calculation | | | | | using generated | | | | | functions for | | | | | transposed | | | | | Jacobian-times- | | | | | vector products | | | | | - typically | | | | | using reverse | | | | | mode AD - if | | | | | available. | | | | | [default: true] | | +------------------+-----------------+------------------+------------------+ | fd_method | OT_STRING | Method for | casadi::Function | | | | finite | Internal | | | | differencing | | | | | [default | | | | | 'central'] | | +------------------+-----------------+------------------+------------------+ | fd_options | OT_DICT | Options to be | casadi::Function | | | | passed to the | Internal | | | | finite | | | | | difference | | | | | instance | | +------------------+-----------------+------------------+------------------+ | gather_stats | OT_BOOL | Deprecated | casadi::Function | | | | option | Internal | | | | (ignored): | | | | | Statistics are | | | | | now always | | | | | collected. | | +------------------+-----------------+------------------+------------------+ | input_scheme | OT_STRINGVECTOR | Deprecated | casadi::Function | | | | option (ignored) | Internal | +------------------+-----------------+------------------+------------------+ | inputs_check | OT_BOOL | Throw exceptions | casadi::Function | | | | when the | Internal | | | | numerical values | | | | | of the inputs | | | | | don't make sense | | +------------------+-----------------+------------------+------------------+ | jac_penalty | OT_DOUBLE | When requested | casadi::Function | | | | for a number of | Internal | | | | forward/reverse | | | | | directions, it | | | | | may be cheaper | | | | | to compute first | | | | | the full | | | | | jacobian and | | | | | then multiply | | | | | with seeds, | | | | | rather than | | | | | obtain the | | | | | requested | | | | | directions in a | | | | | straightforward | | | | | manner. Casadi | | | | | uses a heuristic | | | | | to decide which | | | | | is cheaper. A | | | | | high value of | | | | | 'jac_penalty' | | | | | makes it less | | | | | likely for the | | | | | heurstic to | | | | | chose the full | | | | | Jacobian | | | | | strategy. The | | | | | special value -1 | | | | | indicates never | | | | | to use the full | | | | | Jacobian | | | | | strategy | | +------------------+-----------------+------------------+------------------+ | jit | OT_BOOL | Use just-in-time | casadi::Function | | | | compiler to | Internal | | | | speed up the | | | | | evaluation | | +------------------+-----------------+------------------+------------------+ | jit_options | OT_DICT | Options to be | casadi::Function | | | | passed to the | Internal | | | | jit compiler. | | +------------------+-----------------+------------------+------------------+ | max_num_dir | OT_INT | Specify the | casadi::Function | | | | maximum number | Internal | | | | of directions | | | | | for derivative | | | | | functions. | | | | | Overrules the | | | | | builtin optimize | | | | | d_num_dir. | | +------------------+-----------------+------------------+------------------+ | output_scheme | OT_STRINGVECTOR | Deprecated | casadi::Function | | | | option (ignored) | Internal | +------------------+-----------------+------------------+------------------+ | print_time | OT_BOOL | print | casadi::Function | | | | information | Internal | | | | about execution | | | | | time | | +------------------+-----------------+------------------+------------------+ | regularity_check | OT_BOOL | Throw exceptions | casadi::Function | | | | when NaN or Inf | Internal | | | | appears during | | | | | evaluation | | +------------------+-----------------+------------------+------------------+ | user_data | OT_VOIDPTR | A user-defined | casadi::Function | | | | field that can | Internal | | | | be used to | | | | | identify the | | | | | function or pass | | | | | additional | | | | | information | | +------------------+-----------------+------------------+------------------+ | verbose | OT_BOOL | Verbose | casadi::Function | | | | evaluation for | Internal | | | | debugging | | +------------------+-----------------+------------------+------------------+ >Input scheme: casadi::ConicInput (CONIC_NUM_IN = 12) +--------------+--------+--------------------------------------------------+ | Full name | Short | Description | +==============+========+==================================================+ | CONIC_H | h | The square matrix H: sparse, (n x n). Only the | | | | lower triangular part is actually used. The | | | | matrix is assumed to be symmetrical. | +--------------+--------+--------------------------------------------------+ | CONIC_G | g | The vector g: dense, (n x 1) | +--------------+--------+--------------------------------------------------+ | CONIC_A | a | The matrix A: sparse, (nc x n) - product with x | | | | must be dense. | +--------------+--------+--------------------------------------------------+ | CONIC_LBA | lba | dense, (nc x
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jun 4 10:34:22 2019 A class based formulation of other analyses. It is structured as: Dataset _| |_ | | Analysis Forecast ________________|________________ | | | SubxForecast EC45Forecast Seas5Forecast Dataset initialises the majority of the variables, handles data loading, copying and subsetting, and provides deseasonalising and data reduction methods. Analysis adds a preprocessing method for era5 data, and some additional variable setup Forecast adds an error correction method, and forecast-specific variable setup SubxForecast, EC45Forecast, and Seas5Forecast add filetype specific data processing. @author: josh """ import iris import copy as cp import datetime as dt import iris.coord_categorisation as iccat from iris.analysis.cartography import cosine_latitude_weights import numpy as np import cf_units import os class Dataset: def __init__(self,field,dates,leads=None): """ Dataset is the base class shared by all analysis and forecast data sets. It defines all functions that are generic between datasets. Not normally used directly. Args: * field - A string used to identify which fields to load from file. *date - a list or tuple of 2 datetime.datetime objects specifying the first and last datetime to include in the data *leads - used by the Forecast class only, a list or tuple of 2 floats, specifying minimum and maximum lead times in days to include. """ self.field=field self.dates=dates self._d_l,self._d_u=dates self.leads=leads #Only data of the same forecast hour is currently supported. assert dates[0].hour==dates[1].hour self.hour=[dates[0].hour] #Name of the primary time coordinate self.T="time" #The expected position of the primary time coordinate in the cube self.t=0 #The day of year associated with 'dates' self.calendar_bounds=[d.timetuple().tm_yday for d in dates] self.type=Dataset #A dictionary that can contain any number of iris CubeLists, each #labelled with a keyword. The load_data method generates a "data" and #a "clim" CubeList self.data={} #Used by the get_climatology method self.dist_means=None self.distribution=None #The time unit to use self.U=cf_units.Unit(f"Days since {cf_units.EPOCH}",\ calendar=cf_units.CALENDAR_GREGORIAN) #Constraints applied to the data at different points. self.constraints={ #keep only data with a valid time coordinate "load":iris.Constraint(cube_func=lambda cube: cube.coords(self.T)!=[]), #keep only data that falls within the calendar_bounds "calendar":iris.Constraint(coord_values={"day_of_year":lambda cell:\ self._in_calendar_bounds(cell)}), #keep only data for the right hour "hour":iris.Constraint(coord_values={"hour":lambda cell:\ np.isin(cell,self.hour)[0]}), #keep only data that falls within the dates "data":iris.Constraint(coord_values={self.T:lambda cell:\ self._d_l<=cell<=self._d_u}), #keep only data that falls outside the dates "clim":iris.Constraint(coord_values={self.T:lambda cell:\ (self._d_l>cell)or (cell>self._d_u)}) } self._setup() def _setup(self): """empty method used by derived classes.""" pass def set_path(self,path): """set the path from which to load data""" if os.path.isdir(path): self.path=path else: raise(ValueError("Not a valid path.")) def copy(self): """A method which returns a copy of the Dataset""" copy=self.type(self.field,self.dates,self.leads) copy.dist_means=self.dist_means copy.distribution=self.distribution copy.data=cp.deepcopy(self.data) return copy def add_constraints(self,constr_dict): """add a dictionary of constraints 'constr_dict' to the constraints attribute. Any previously defined keywords will be overwritten.""" for key in constr_dict: self.constraints[key]=constr_dict[key] def load_data(self,strict=True): """Load data from self.path as a list of iris cubes, preprocess it, and split it into two CubeLists "data" and "clim". """ CL=iris.cube.CubeList() fs=[self.path+f for f in os.listdir(self.path) if f.endswith(".nc")] for f in fs: CL.append(iris.load_cube(f,constraint=self.constraints["load"])) self.data=CL self._clean_loaded_data() a=self.data.extract(self.constraints["data"]) c=self.data.extract(self.constraints["clim"]) if strict: if a is None: raise(ValueError("No data after applying constraints.")) if c is None: raise(ValueError("No climatology data after applying constraints.")) self.data={"data":a,"clim":c} def _clean_loaded_data(self): """empty method used by derived classes.""" pass def _in_calendar_bounds(self,x): """Evaluates whether a real number x lies between the calendar_bounds of the dataset, wrapping around the end of the year if necessary.""" c0,c1=self.calendar_bounds if c1<c0: ans=(x<=c1) or (x>=c0) else: ans=(x<=c1) and (x>=c0) return ans def restrict_area(self,region): """A convenience method that restricts the spatial extent of the Dataset to one of a few preset domains, defined by a string "region". """ if region.lower()=="europe": lons=[-15,20] lats=[32,60] elif region.lower()=="france": lons=[-5,8] lats=[42,51] elif region.lower()=="north_atlantic": lons=[-80,40] lats=[30,90] else: raise(ValueError(f"Unrecognised region {region}.")) #We use this over intersection, because it works for cubelists area_constr=iris.Constraint(longitude=lambda x: lons[0]<=x<=lons[1],\ latitude=lambda x: lats[0]<=x<=lats[1]) for key in self.data: self.data[key]=self.data[key].extract(area_constr) def add_cat_coord(self,iccat_function,coordname,base_coord): """Adds a categorical coordinate to all cubes in Dataset.data, defined by 'iccat_function' relative to 'base_coord', and called 'coordname'. Note that the name of the new coord is defined internally by iccat_function; coordname serves only to graciously handle the case when that coordinate already exists.""" for key in self.data: for i,entry in enumerate(self.data[key]): if entry.coords(coordname)==[]: iccat_function(entry,base_coord) def change_units(self,unit_str=None,cf_unit=None): """Changes the units of all cubes in the Dataset to a new unit given either by a valid cf_units.Unit string specifier 'unit_str', or a cf_units.Unit object, 'cf_unit'.""" if unit_str is not None and cf_unit is not None: raise(ValueError("Only one unit can be provided.")) elif unit_str is not None: unit=cf_units.Unit(unit_str) elif cf_unit is not None: unit=cf_unit else: raise(ValueError("A unit must be provided.")) for key in self.data: for i,entry in enumerate(self.data[key]): entry.convert_units(unit) def change_dates(self,newdates): """ Redefines the 'dates' attribute to the list of 2 datetimes 'newdates', reapplying the "data" and "clim" constraints to match **currently quite slow for large cubelists** """ self.dates=newdates self._d_l,self._d_u=self.dates self.calendar_bounds=[d.timetuple().tm_yday for d in self.dates] CL_data=iris.cube.CubeList() CL_clim=iris.cube.CubeList() for key in self.data: a=self.data[key].extract(self.constraints["data"]) if a != []: CL_data.append(a) a=self.data[key].extract(self.constraints["clim"]) if a != []: CL_clim.append(a) CL_data=iris.cube.CubeList([c for C in CL_data for c in C]) CL_clim=iris.cube.CubeList([c for C in CL_clim for c in C]) self.data["data"]=CL_data.concatenate() self.data["clim"]=CL_clim.concatenate() def change_calendar(self,newcalendar): for key in self.data: for i,entry in enumerate(self.data[key]): newunit=cf_units.Unit(\ entry.coord("time").units.origin,calendar=newcalendar) self.data[key][i].coord("time").unit=newunit def aggregate_by(self,coords,bins,aggregator=iris.analysis.MEAN): """Aggregates the coordinates of all cubes in Dataset into user defined bins. Args: *coords - A list of strings which are the coordinates to be aggregated over. *bins - A corresponding list of lists 'bins'. bins[i] should contain the bounding values over which to group coords[i]. Kwargs: *aggregator -A valid iris.analysis.Aggregator object which specifies how to aggregate entries together. """ binlabels=[] for j,coord in enumerate(coords): binlabels.append(f"bin{j}") for key in self.data: for i,entry in enumerate(self.data[key]): for j,(coord,b) in enumerate(zip(coords,bins)): #remove potential old bins: if self.data[key][i].coords(f"bin{j}")!=[]: self.data[key][i].remove_coord(f"bin{j}") if self.data[key][i].coords(coord)==[]: raise(ValueError("No such coordinate in cube!")) label=np.digitize(entry.coord(coord).points,b) coord_dim=entry.coord_dims(entry.coord(coord)) entry.add_aux_coord(iris.coords.AuxCoord(label,\ var_name=f"bin{j}"),data_dims=coord_dim) self.data[key][i]=entry.aggregated_by(binlabels,aggregator) for j,coord in enumerate(coords): if self.data[key][i].coords(coord)!=[]: self.data[key][i].remove_coord(f"bin{j}") def collapse_over(self,coord,aggregator=iris.analysis.MEAN): """Collapses all cubes in Dataset over a single coordinate. Args: *coords - A string which is the coordinate to collapse. Kwargs: *aggregator -A valid iris.analysis.Aggregator object which specifies how to collapse the coordinate. """ for key in self.data: for i,entry in enumerate(self.data[key]): self.data[key][i]=self.data[key][i].collapsed(coord,aggregator) def apply_coslat_mean(self,mask=None): """Collapses the latitude and longitude coordinates of all cubes in Dataset, using a cosine latitude weighting. Kwargs: *mask: A cube with matching latitude and longitude coordinates to the cubes in Dataset. Each gridpoint in 'mask' should vary between 0 (totally masked) to 1 (totally unmasked). """ for key in self.data: for i,entry in enumerate(self.data[key]): weights = cosine_latitude_weights(entry) #include the land sea mask in the weighting if one was passed. if mask is not None: weights=weights*mask.data self.data[key][i]=entry.collapsed(["latitude","longitude"],\ iris.analysis.MEAN,weights=weights) def regrid_to(self,dataset=None,cube=None,regridder=iris.analysis.Linear()): """regrids every cube in Dataset to match either those of another Dataset object, or an iris.Cube object.""" if cube is None and dataset is None: raise(ValueError("No reference for regridding provided!")) elif cube is None: ref_cube=dataset.data["data"][0] else: ref_cube=cube for key in self.data: for i,entry in enumerate(self.data[key]): self.data[key][i]=entry.regrid(ref_cube,regridder) def apply(self,func,*args,in_place=True,keys=None,**kwargs): """A method which applies a function to every cube in Dataset Args: *func - A function of the type func(cube,*args,**kwargs). Kwargs: in_place - A boolean, specifying whether func returns an output or not. If True, cube is set equal to func(cube), unless the output is None, in which case cube is removed from the CubeList. """ if keys is None: keys=self.data for key in keys: for i,entry in enumerate(self.data[key]): result=func(entry,*args,**kwargs) if in_place: pass else: if result is not None: self.data[key][i]=result else: self.data[key].remove(self.data[key][i]) def apply_constraint(self,constraint,keys=None): """Apply a constraint to all cubes in Dataset""" if keys is None: keys=self.data for key in
<reponame>fplk/mcs-scene-generator import argparse import glob import logging import os import shapely from shapely import affinity import machine_common_sense as mcs import optimal_path DEBUG_DIRECTORY = './' PERFORMER_AGENT_MAX_REACH = 1 PERFORMER_AGENT_MASS = 2 TROPHY_SIZE = (0.19, 0.14) def action_to_string(action_data): """Return the given action data as a string.""" action_text = action_data['action'] for key, value in action_data['params'].items(): action_text += ',' + key + '=' + value return action_text def action_list_to_single_string(action_list): """Return the given action data list as a single string.""" return ';'.join([ action_to_string(action_data) for action_data in action_list ]) def find_distance_to(step_metadata, object_metadata): """Find and return the distance from the performer agent's current location to the object with the given object output metadata.""" bounds = shapely.geometry.box( object_metadata.position['x'] - (TROPHY_SIZE[0] / 2.0), object_metadata.position['z'] - (TROPHY_SIZE[1] / 2.0), object_metadata.position['x'] + (TROPHY_SIZE[0] / 2.0), object_metadata.position['z'] + (TROPHY_SIZE[1] / 2.0) ) bounds = affinity.rotate(bounds, -object_metadata.rotation['y']) distance = shapely.geometry.Point( step_metadata.position['x'], step_metadata.position['z'] ).distance(bounds) return distance def find_path_list(scene_data, debug_plots): """Find and return the list of each possible best path.""" target_dict = find_target_dict(scene_data) path_list = optimal_path.find_possible_best_path_list( scene_data['performerStart'], target_dict, [object_dict for object_dict in scene_data['objects'] if ( # Ignore the target object. object_dict['id'] != target_dict['id'] and # Ignore any object inside a container. (not object_dict.get('locationParent', None)) and # Ignore any object light enough that won't obstruct the performer. (object_dict['mass'] > PERFORMER_AGENT_MASS) )], (DEBUG_DIRECTORY + scene_data['name']) if debug_plots else None ) for path in path_list: if 'locationParent' in target_dict: optimal_path.open_container_and_pickup_target( path, target_dict['id'], [object_dict for object_dict in scene_data['objects'] if ( object_dict['id'] == target_dict['locationParent'] )][0] ) else: optimal_path.pickup_target(path, target_dict['id']) return path_list def find_target_dict(scene_data): """Find and return the target dict from the scene data.""" return scene_data['objects'][0] def read_path_file(folder, name): """Read and return an action path from file.""" path = optimal_path.ShortestPath([], None, None) with open(folder + '/' + name + '.txt', 'r') as action_file: for line in action_file: line_data = line.strip().split(',') action_data = { 'action': line_data[0], 'params': {} } for key_value in line_data[1:]: key_value_data = key_value.split('=') action_data['params'][key_value_data[0]] = key_value_data[1] path.action_list.append(action_data) return [path] def run_scene_with_action_list(scene_data, controller, action_list): """Run the MCS scene with the given data using the given MCS controller and MCS action data list. Return the reward, obstructed status, and modified action data list.""" modified_action_list = action_list # Start the scene. step_metadata = controller.start_scene(scene_data) opened = False target_dict = find_target_dict(scene_data) container_id = target_dict.get('locationParent', None) # Run each action from the scene's shortest path. for index, action_data in enumerate(action_list): action = action_data['action'] params = action_data['params'] step_metadata = controller.step(action, **params) # If the path was obstructed, just quit now. if step_metadata.return_status == 'OBSTRUCTED': return 0, index, action_list # Try to open the target's container if it's within reach and visible. if container_id and not opened: done, step_metadata, modified_action_list, opened = try_early_open( controller, step_metadata, action_list, index, container_id ) # Try to pickup the target object if it's within reach and visible. else: done, step_metadata, modified_action_list = ( try_early_pickup(controller, step_metadata, action_list, index) ) # If trying to pickup the target early worked, end the scene now. if done: break # End the scene. controller.end_scene("", 1) # Return the reward received from the last action. return step_metadata.reward, -1, modified_action_list def save_shortest_path(output_folder, output_filename, action_list): """Save the action list to file.""" os.makedirs(output_folder, exist_ok=True) output_filename = output_folder + '/' + output_filename + '.txt' with open(output_filename, 'w') as output_file: for action_data in action_list: output_file.write(action_to_string(action_data) + '\n') def try_early_open(controller, step_metadata, action_list, index, object_id): """Try to open the container early if it's visible and within reach; then, try to pickup the target; if successful, return the modified action data list.""" for object_metadata in step_metadata.object_list: # If this is the output metadata for the container and it's visible... if object_metadata.uuid == object_id and object_metadata.visible: distance = find_distance_to(step_metadata, object_metadata) # If it's within reach... if distance <= PERFORMER_AGENT_MAX_REACH: # Run an open action. step_metadata = controller.step( 'OpenObject', objectId=object_metadata.uuid ) # If successful, modify the action list with the open action. if step_metadata.return_status == 'SUCCESSFUL': action_list = action_list[:(index + 1)] + [{ 'action': 'OpenObject', 'params': {'objectId': object_metadata.uuid} }] # If out-of-reach... elif step_metadata.return_status == 'OUT_OF_REACH': # Run a move action. step_metadata = controller.step('MoveAhead') # If obstructed, the early open failed, so return. if step_metadata.return_status == 'OBSTRUCTED': return False, step_metadata, action_list, False # Else run an open action again. step_metadata = controller.step( 'OpenObject', objectId=object_metadata.uuid ) # If the open action is obstructed or out-of-reach, the # early open failed, so undo the move and return. if ( step_metadata.return_status == 'OUT_OF_REACH' or step_metadata.return_status == 'OBSTRUCTED' ): step_metadata = controller.step('MoveBack') return False, step_metadata, action_list, False # Else the early open was successful so modify the action # list with the new move and open actions. action_list = action_list[:(index + 1)] + [{ 'action': 'MoveAhead', 'params': {} }, { 'action': 'OpenObject', 'params': {'objectId': object_metadata.uuid} }] # Else, assume the early open failed and return. else: return False, step_metadata, action_list, False # If the early open was successful, try an early pickup. done, step_metadata, action_list = try_early_pickup( controller, step_metadata, action_list, index ) return done, step_metadata, action_list, True # Return failed (object was not visible or not within reach). return False, step_metadata, action_list, False def try_early_pickup(controller, step_metadata, action_list, index): """Try to pickup the target early if it's visible and within reach; if successful, return the modified action data list.""" for object_metadata in step_metadata.object_list: # If this is the output metadata for the target and it's visible... if object_metadata.shape == 'trophy' and object_metadata.visible: distance = find_distance_to(step_metadata, object_metadata) # If it's within reach... if distance <= PERFORMER_AGENT_MAX_REACH: # Run a pickup action. step_metadata = controller.step( 'PickupObject', objectId=object_metadata.uuid ) # If successful, modify the action list with the pickup action. if step_metadata.return_status == 'SUCCESSFUL': action_list = action_list[:(index + 1)] + [{ 'action': 'PickupObject', 'params': {'objectId': object_metadata.uuid} }] # If out-of-reach... elif step_metadata.return_status == 'OUT_OF_REACH': # Run a move action. step_metadata = controller.step('MoveAhead') # If obstructed, the early pickup failed, so return. if step_metadata.return_status == 'OBSTRUCTED': return False, step_metadata, action_list # Else run a pickup action again. step_metadata = controller.step( 'PickupObject', objectId=object_metadata.uuid ) # If the pickup action is obstructed or out-of-reach, the # early pickup failed, so undo the move and return. if ( step_metadata.return_status == 'OUT_OF_REACH' or step_metadata.return_status == 'OBSTRUCTED' ): step_metadata = controller.step('MoveBack') return False, step_metadata, action_list # Else the early pickup was successful so modify the action # list with the new move and pickup actions. action_list = action_list[:(index + 1)] + [{ 'action': 'MoveAhead', 'params': {} }, { 'action': 'PickupObject', 'params': {'objectId': object_metadata.uuid} }] # Else, assume the early pickup failed and return. else: return False, step_metadata, action_list # Return success. return True, step_metadata, action_list # Return failed (object was not visible or not within reach). return False, step_metadata, action_list def main(args): # Identify all the _debug.json MCS scene files. filename_list = glob.glob(args.file_path_prefix + '*_debug.json') filename_list.sort() if len(filename_list) == 0: print(f'No files ending in _debug.json with prefix: ' f'{args.file_path_prefix}') return # Start a single MCS controller for testing all the MCS scene files. controller = mcs.create_controller(args.mcs_unity_build, debug=True, history_enabled=False) finished_file_list = [] failed_file_list = [] for filename in filename_list: print('**************************************') print(f'>>>>> {filename}') obstructed_path_text_list = [] reward = None # Load the scene data from its JSON file. scene_data, status = mcs.load_config_json_file(filename) if status is not None: print(status) continue # Find each possible best path for the scene. path_list = ( read_path_file(args.action_file_folder, scene_data.name) if args.read_existing else find_path_list(scene_data, args.debug_plots) ) if args.debug_actions: for i, path in enumerate(path_list): save_shortest_path( DEBUG_DIRECTORY + scene_data.name + '/', scene_data.name + '_' + str(i), path.action_list ) for i, path in enumerate(path_list): print(f'>>>>> Shortest Path {i}: {len(path_list[0].action_list)}') # If this path starts with a series of actions that was already # tried and returned obstructed, then skip this path. path_text = action_list_to_single_string(path.action_list) obstructed = False for obstructed_path_text in obstructed_path_text_list: if path_text.startswith(obstructed_path_text): obstructed = True break if obstructed: print(f'>>>>> Skipping Obstructed Path {i}') continue # Test the path to see if it will return a positive reward. reward, obstructed_step, modified_action_list = ( run_scene_with_action_list( scene_data, controller, path.action_list ) ) # If the path was obstructed, then try the next path. if obstructed_step >= 0: reward = None obstructed_path_text_list.append(action_list_to_single_string( path.action_list[:(obstructed_step + 1)] )) continue if reward: print(f'>>>>>
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # This file is part of the bapsflib package, a Python toolkit for the # BaPSF group at UCLA. # # http://plasma.physics.ucla.edu/ # # Copyright 2017-2018 <NAME> and contributors # # License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full # license terms and contributor agreement. # import astropy.units as u import h5py import numpy as np import os import unittest as ut from unittest import mock from bapsflib._hdf.maps import HDFMap from bapsflib._hdf.maps.digitizers.sis3301 import HDFMapDigiSIS3301 from ..file import File from ..hdfreadcontrols import HDFReadControls from ..hdfreaddata import ( build_sndr_for_simple_dset, condition_shotnum, do_shotnum_intersection, HDFReadData, ) from . import TestBase, with_bf class TestHDFReadData(TestBase): """ Test Case for :class:`~bapsflib._hdf.utils.hdfreaddata.HDFReadData` """ # # Notes: # - tests are currently performed on digitizer 'SIS 3301' def setUp(self): super().setUp() def tearDown(self): super().tearDown() @with_bf @mock.patch("bapsflib._hdf.utils.hdfreaddata.HDFReadControls") @mock.patch("bapsflib._hdf.utils.hdfreaddata.condition_controls") @mock.patch.object( HDFMap, "controls", new_callable=mock.PropertyMock, return_value={"control": None} ) def test_adding_controls(self, _bf: File, mock_cmap, mock_cc, mock_cdata): """Test adding control device data to digitizer data.""" # setup HDF5 sn_size = 50 self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": sn_size, "nt": 1000}) _mod = self.f.modules["SIS 3301"] digi = "SIS 3301" adc = "SIS 3301" config_name = _mod.knobs.active_config[0] bc_arr = _mod.knobs.active_brdch bc_indices = np.where(bc_arr) brd = bc_indices[0][0] ch = bc_indices[1][0] _bf._map_file() # re-map file digi_path = "Raw data + config/SIS 3301" dset_name = f"{config_name} [{brd}:{ch}]" dset_path = f"{digi_path}/{dset_name}" dset = _bf.get(dset_path) dheader = _bf.get(f"{dset_path} headers") shotnumkey = "Shot" # setup mock control data cdata = np.empty( 20, dtype=[ ("shotnum", np.uint32, 1), ("xyz", np.float32, 3), ("freq", np.float32, 1), ], ) cdata["shotnum"] = np.arange(41, 61, 1, dtype=np.uint32) cdata["xyz"][..., 0] = np.arange(0.0, 20.0, 1.0, dtype=np.float32) cdata["xyz"][..., 1] = np.arange(0.0, 30.0, 1.5, dtype=np.float32) cdata["xyz"][..., 2] = np.arange(0.0, -40.0, -2.0, dtype=np.float32) cdata["freq"] = np.arange(20.0, 120.0, 5.0, dtype=np.float32) # setup mock condition_controls mock_cc.return_value = [("control", "config01")] # -- `intersection_set=True` ---- # shotnum exists in both digitizer and control dataset shotnum = 45 indices = [44] m_info = { "controls": { "control": { "device group path": "Raw data + config/control", "contype": "motion+", "configuration name": "config01", } } } m_cdata = np.reshape(cdata[4], 1).view(HDFReadControls) m_cdata._info = m_info mock_cdata.return_value = m_cdata.view(HDFReadControls) data = HDFReadData( _bf, brd, ch, config_name=config_name, adc=adc, digitizer=digi, shotnum=shotnum, add_controls=[("control", "config01")], intersection_set=True, ) self.assertDataObj(data, _bf, motion_added=True) self.assertTrue( np.array_equiv(data["shotnum"], np.array([shotnum], dtype=np.uint32)) ) self.assertDataArrayValues(data, dset, indices) self.assertControlInData( m_cdata, data, np.array([shotnum], dtype=np.uint32).reshape(1) ) self.assertTrue(mock_cdata.called) self.assertTrue(mock_cc.called) mock_cdata.reset_mock() mock_cc.reset_mock() # some shot numbers are missing from the control dataset shotnum = [20, 45] indices = [44] m_info = { "controls": { "control": { "device group path": "Raw data + config/control", "contype": "motion+", "configuration name": "config01", } } } fields = list(cdata.dtype.names) fields.remove("xyz") m_cdata = np.reshape(cdata[fields][4], (1,)) m_cdata = m_cdata.view(HDFReadControls) m_cdata._info = m_info mock_cdata.return_value = m_cdata.view(HDFReadControls) data = HDFReadData( _bf, brd, ch, config_name=config_name, adc=adc, digitizer=digi, shotnum=shotnum, add_controls=[("control", "config01")], intersection_set=True, ) self.assertDataObj(data, _bf, motion_added=True) self.assertTrue(np.array_equiv(data["shotnum"], np.array([45], dtype=np.uint32))) self.assertDataArrayValues(data, dset, indices) self.assertControlInData( m_cdata, data, np.array([45], dtype=np.uint32).reshape(1) ) self.assertTrue(mock_cdata.called) self.assertTrue(mock_cc.called) mock_cdata.reset_mock() mock_cc.reset_mock() # control does not have 'xyz' shotnum = 45 indices = [44] m_info = { "controls": { "control": { "device group path": "Raw data + config/control", "contype": "motion+", "configuration name": "config01", } } } m_cdata = np.reshape(cdata[4], 1).view(HDFReadControls) m_cdata._info = m_info mock_cdata.return_value = m_cdata.view(HDFReadControls) data = HDFReadData( _bf, brd, ch, config_name=config_name, adc=adc, digitizer=digi, shotnum=shotnum, add_controls=[("control", "config01")], intersection_set=True, ) self.assertDataObj(data, _bf, motion_added=True) self.assertTrue( np.array_equiv(data["shotnum"], np.array([shotnum], dtype=np.uint32)) ) self.assertDataArrayValues(data, dset, indices) self.assertControlInData( m_cdata, data, np.array([shotnum], dtype=np.uint32).reshape(1) ) self.assertTrue(mock_cdata.called) self.assertTrue(mock_cc.called) mock_cdata.reset_mock() mock_cc.reset_mock() # -- `intersection_set=False` ---- # some shot numbers are missing from the control dataset shotnum = [20, 45] indices = [19, 44] m_info = { "controls": { "control": { "device group path": "Raw data + config/control", "contype": "motion+", "configuration name": "config01", } } } m_cdata = np.empty(1, dtype=cdata.dtype).view(HDFReadControls) m_cdata[0]["shotnum"] = 20 m_cdata[0]["xyz"] = np.nan m_cdata[0]["freq"] = np.nan m_cdata = np.append(m_cdata, np.reshape(cdata[4], 1)) m_cdata = m_cdata.view(HDFReadControls) m_cdata._info = m_info mock_cdata.return_value = m_cdata.view(HDFReadControls) data = HDFReadData( _bf, brd, ch, config_name=config_name, adc=adc, digitizer=digi, shotnum=shotnum, add_controls=[("control", "config01")], intersection_set=False, ) self.assertDataObj(data, _bf, motion_added=True) self.assertTrue( np.array_equiv(data["shotnum"], np.array([shotnum], dtype=np.uint32)) ) self.assertDataArrayValues(data, dset, indices) self.assertControlInData(m_cdata, data, np.array(shotnum, dtype=np.uint32)) self.assertTrue(mock_cdata.called) self.assertTrue(mock_cc.called) mock_cdata.reset_mock() mock_cc.reset_mock() @with_bf def test_kwarg_adc(self, _bf: File): """Test handling of keyword `adc`.""" # Handling should be done by mapping function # `construct_dset_name` which is covered by the associated # mapping test # # add a digitizer self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": 50}) _mod = self.f.modules["SIS 3301"] digi = "SIS 3301" adc = "SIS 3301" config_name = _mod.knobs.active_config[0] bc_arr = _mod.knobs.active_brdch bc_indices = np.where(bc_arr) brd = bc_indices[0][0] ch = bc_indices[1][0] _bf._map_file() # re-map file _map = _bf.file_map.digitizers[digi] with mock.patch.object( HDFMapDigiSIS3301, "construct_dataset_name", wraps=_map.construct_dataset_name ) as mock_cdn: # everything is good data = HDFReadData( _bf, brd, ch, adc=adc, digitizer=digi, config_name=config_name ) self.assertTrue(mock_cdn.called) self.assertDataObj(data, _bf) self.assertEqual(data.info["adc"], adc) mock_cdn.reset_mock() # not a configuration name with self.assertRaises(ValueError): data = HDFReadData( _bf, brd, ch, adc="not an adc", digitizer=digi, config_name=config_name, ) self.assertTrue(mock_cdn.called) mock_cdn.reset_mock() # `adc` None data = HDFReadData(_bf, brd, ch, digitizer=digi, config_name=config_name) self.assertTrue(mock_cdn.called) self.assertDataObj(data, _bf) self.assertEqual(data.info["adc"], adc) mock_cdn.reset_mock() @with_bf def test_kwarg_board(self, _bf: File): """Test handling of argument `board`.""" # Handling should be done by mapping function # `construct_dset_name` which is covered by the associated # mapping test # # add a digitizer self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": 50}) _mod = self.f.modules["SIS 3301"] digi = "SIS 3301" adc = "SIS 3301" config_name = _mod.knobs.active_config[0] bc_arr = _mod.knobs.active_brdch bc_indices = np.where(bc_arr) brd = bc_indices[0][0] ch = bc_indices[1][0] # re-map file _bf._map_file() # run assertions _map = _bf.file_map.digitizers[digi] with mock.patch.object( HDFMapDigiSIS3301, "construct_dataset_name", wraps=_map.construct_dataset_name ) as mock_cdn: # everything is good data = HDFReadData( _bf, brd, ch, adc=adc, digitizer=digi, config_name=config_name ) self.assertTrue(mock_cdn.called) self.assertDataObj(data, _bf) self.assertEqual(data.info["board"], brd) mock_cdn.reset_mock() # not a configuration name with self.assertRaises(ValueError): data = HDFReadData( _bf, -1, ch, adc=adc, digitizer=digi, config_name=config_name ) self.assertTrue(mock_cdn.called) mock_cdn.reset_mock() @with_bf def test_kwarg_channel(self, _bf: File): """Test handling of argument `channel`.""" # Handling should be done by mapping function # `construct_dset_name` which is covered by the associated # mapping test # # add a digitizer self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": 50}) _mod = self.f.modules["SIS 3301"] digi = "SIS 3301" adc = "SIS 3301" config_name = _mod.knobs.active_config[0] bc_arr = _mod.knobs.active_brdch bc_indices = np.where(bc_arr) brd = bc_indices[0][0] ch = bc_indices[1][0] _bf._map_file() # re-map file _map = _bf.file_map.digitizers[digi] with mock.patch.object( HDFMapDigiSIS3301, "construct_dataset_name", wraps=_map.construct_dataset_name ) as mock_cdn: # everything is good data = HDFReadData( _bf, brd, ch, adc=adc, digitizer=digi, config_name=config_name ) self.assertTrue(mock_cdn.called) self.assertDataObj(data, _bf) self.assertEqual(data.info["channel"], ch) mock_cdn.reset_mock() # not a configuration name with self.assertRaises(ValueError): data = HDFReadData( _bf, brd, -1, adc=adc, digitizer=digi, config_name=config_name ) self.assertTrue(mock_cdn.called) mock_cdn.reset_mock() @with_bf def test_kwarg_config_name(self, _bf: File): """Test handling of keyword `digitizer`.""" # Handling should be done by mapping function # `construct_dset_name` which is covered by the associated # mapping test # # add a digitizer self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": 50}) _mod = self.f.modules["SIS 3301"] digi = "SIS 3301" adc = "SIS 3301" config_name = _mod.knobs.active_config[0] bc_arr = _mod.knobs.active_brdch bc_indices = np.where(bc_arr) brd = bc_indices[0][0] ch = bc_indices[1][0] _bf._map_file() # re-map file _map = _bf.file_map.digitizers[digi] with mock.patch.object( HDFMapDigiSIS3301, "construct_dataset_name", wraps=_map.construct_dataset_name ) as mock_cdn: # everything is good data = HDFReadData( _bf, brd, ch, adc=adc, digitizer=digi, config_name=config_name ) self.assertTrue(mock_cdn.called) self.assertDataObj(data, _bf) self.assertEqual(data.info["configuration name"], config_name) mock_cdn.reset_mock() # not a configuration name with self.assertRaises(ValueError): data = HDFReadData( _bf, brd, ch, adc=adc, digitizer=digi, config_name="not a config" ) self.assertTrue(mock_cdn.called) mock_cdn.reset_mock() # `config_name` None with self.assertWarns(UserWarning): data = HDFReadData(_bf, brd, ch, adc=adc, digitizer=digi) self.assertTrue(mock_cdn.called) self.assertDataObj(data, _bf) self.assertEqual(data.info["configuration name"], config_name) mock_cdn.reset_mock() @with_bf def test_kwarg_digitizer(self, _bf: File): """Test handling of keyword `digitizer`.""" # Note: cases were an exception is raise is covered by # `test_raise_errors` # # add a digitizer self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": 50}) _mod = self.f.modules["SIS 3301"] digi = "SIS 3301" adc = "SIS 3301" config_name = _mod.knobs.active_config[0] bc_arr = _mod.knobs.active_brdch bc_indices = np.where(bc_arr) brd = bc_indices[0][0] ch = bc_indices[1][0] _bf._map_file() # re-map file # `digitizer` is None and "main" digitizer was identified with self.assertWarns(UserWarning): data = HDFReadData(_bf, brd, ch, adc=adc, config_name=config_name) self.assertDataObj(data, _bf) self.assertEqual(data.info["digitizer"], digi) # specified `digitizer` is VALID data = HDFReadData(_bf, brd, ch, digitizer=digi, adc=adc, config_name=config_name) self.assertDataObj(data, _bf) self.assertEqual(data.info["digitizer"], digi) @with_bf def test_kwarg_keep_bits(self, _bf: File): """Test behavior of keyword `keep_bits`.""" # setup sn_size = 50 self.f.add_module("SIS 3301", {"n_configs": 1, "sn_size": sn_size, "nt": 1000}) _mod = self.f.modules["SIS 3301"] digi = "SIS
data PC_Height = Height[space[0]:space[1]][mask] # Determine the horizontal distance between RUAO and Point Charge. if method == 'static': Cloud_Height[i] = PC_Height elif method == 'dynamic': Cloud_Height[i] = ((Radiosonde_Data['Units']['range'][space[0]:space[1]][mask]/1000)**2 + PC_Height**2)**0.5 # Convert heights from km to m Cloud_Height *= 1000 # [3] POINT CHARGE AREA # Calculate the area that each point charge corresponds to. Here # we use the vertical height gained between sign changes. Cloud_AscentArea = np.array([Height[space[1]] - Height[space[0]] for space in Cloud_SignChange], dtype=np.float64)*1000 # [4] CLOUD CHARGE # Now calculate the charge within the area. Cloud_Charge = Cloud_SpaceCharge * (4/3)*np.pi*Cloud_AscentArea**3 # * 3 # [5] CLOUD VELOCITY # Calculate the velocity of each point charge. Cloud_Velocity = np.array([np.nanmean(Wind[space[0]:space[1]]) for space in Cloud_SignChange], dtype=np.float64) # /4.5 # [6] CLOUD TIME # Specify the time range in seconds to calculate the electric field over. # The time range revolves around the first point charge specified, # therefore, Cloud_Time is typically specified with +- bounds. Cloud_Time = np.arange(-3000, 3000, 1) # [7] CLOUD TIME DIFFERENCE # Get the time for each point charge. Cloud_TimeDiff = np.zeros(Cloud_SignChange.shape[0]) if method == 'dynamic': for i, space in enumerate(Cloud_SignChange): # Get the local index of the absolute maximum space charge density mask = np.argmax(np.abs(SpaceCharge[space[0]:space[1]])) # Find the height using 'mask' on the same subsetted data Cloud_TimeDiff[i] = Time[space[0]:space[1]][mask] Cloud_TimeDiff -= Cloud_TimeDiff[0] ############################################################################ """Calculate the electric field""" # [8] ELECTRIC FIELD CALCULATION # Now the Cloud_Time, Cloud_TimeDiff, Cloud_Velocity, Cloud_Height and # Cloud_Charge has been calculated, the electric field can now be found Cloud_ElectricField = zip(np.zeros(Cloud_SignChange.shape[0])) for i, (time_diff, height, velocity, charge) in enumerate( zip(Cloud_TimeDiff, Cloud_Height, Cloud_Velocity, Cloud_Charge)): #Cloud_ElectricField[i] = (gu.cosarctan(((Cloud_Time+time_diff)*velocity)/height)*charge)/(k*height**2) Cloud_ElectricField[i] = (gu.cosarctan(((Cloud_Time-time_diff)*velocity)/height)*charge)/(k*height**2) # Add the background electric field to the calculations. For now we can # assume the background electric field is 100 V/m. Cloud_ElectricField_Total = PG0 + np.nansum(Cloud_ElectricField, axis=0) ############################################################################ """Determine the time stamp data for the flight""" # Get the time when the radiosonde reached the cloud base Cloud_BaseTime = LaunchTime + np.timedelta64(int(Radiosonde_Data['Units']['time'][CloudIndex[0]]), 's') # Calculate datetimes for each calculation in Cloud_ElectricField_Total Cloud_DateTime = Cloud_BaseTime + Cloud_Time.astype('timedelta64[s]') if verbose: print("Number of Point Charges =", Cloud_SignChange.shape[0]) return Cloud_DateTime, Cloud_ElectricField, Cloud_ElectricField_Total def Superplotter(self): """ This function will plot the data from a single radiosonde flight """ if self.verbose: gu.cprint("[INFO] You are running Superplotter from the DEV release", type='bold') ############################################################################ """Prerequisites""" # Time Controls t_begin = time.time() # Make data local to method Radiosonde_File = self.Radiosonde_File[self.sensor_package] ############################################################################ """[Step 1] Plot radiosonde data""" # Specify plot title plot_title = 'Radiosonde Flight No.' + self.sensor_package + ' (' + self.Launch_Datetime[self.sensor_package].strftime("%d/%m/%Y %H%MUTC") + ')' # Set-up radiosonde plotter Superplotter = SPRadiosonde(self.Radiosonde_Data, numplots=7, which_ascents=(self.sensor_package,), plot_title=plot_title, height_range=self.height_range, calibrate=self.calibrate) if self.calibrate in ['Counts', 'Volts']: Superplotter.Charge(linear=True, log=False) Superplotter.Charge(linear=False, log=True) else: Superplotter.Charge(type='space_charge') if int(self.sensor_package) in [1,2,3,4,5]: # Plot cloud sensor data Superplotter.Cloud(ir=True, cyan=True) # Plot liquid water sensor data if int(self.sensor_package) < 3: Superplotter.Liquid_Water(type='Liquid_Water', point=False) else: Superplotter.Liquid_Water(type='Liquid_Water', point=True) # Plot calibrated liquid water sensor data if self.calibrate in ['Units']: if int(self.sensor_package) < 3: Superplotter.Liquid_Water(type='SLWC', point=False) else: Superplotter.Liquid_Water(type='SLWC', point=True) else: # Plot cloud sensor data Superplotter.Cloud() # Plot turbulence sensor data if self.calibrate in ['Units']: Superplotter.Turbulence(type='Turbulence') Superplotter.Turbulence(type='Eddy Dissipation Rate') else: Superplotter.Turbulence(type='Turbulence') # Plot the processed Liquid_Water data #if (self.calibrate == "units") & (int(self.sensor_package) < 8): Superplotter.ch(14, 'SLWC $(g$ $m^{-3})$', 'Supercooled Liquid\nWater Concentration', check=1112, point=True) # Plot the cloud boundaries if specified if self.Clouds_ID is not None: Superplotter.Cloud_Boundaries(self.Clouds_ID, self.LayerType, CloudOnly=True) ############################################################################ """[Step 2] Save plot and return""" # Specify the directory the plots are stored in path = os.path.dirname(Radiosonde_File).replace(self.Storage_Path + self.Processed_Data_Path,"") # Find any other plots stored in this directory previous_plots = glob.glob(self.Storage_Path + self.Radiosonde_Plots_Path + path + "/*") # Find the biggest 'v' number in plots plot_version = [] for plots in previous_plots: try: plot_version.append(int(os.path.basename(plots)[34:37])) except ValueError: plot_version.append(int(os.path.basename(plots)[34:36])) plot_version = str(np.max(plot_version)+1) if len(plot_version) != 0 else '1' # Create full directory and file name Save_Location = self.Storage_Path + self.Radiosonde_Plots_Path + path + '/' + path + '_v' + plot_version.rjust(2,'0') + '_' + str(self.height_range[0]).rjust(2,'0') + 'km_to_' + str(self.height_range[1]).rjust(2,'0') + 'km.png' # Ensure the directory exists on file system and save to that location gu.ensure_dir(os.path.dirname(Save_Location)) Superplotter.savefig(Save_Location) if self.verbose is True: print("[INFO] Superplotter completed successfully (In %.2fs)" % (time.time()-t_begin)) def Tephigram(self, plot_tephigram=False, plot_larkhill=False): """ The Radiosonde_Tephigram function will plot a tephigram from the dry bulb temperature, T_dry and the Dew point Temperature, T_dew for pressure values, P at each corresponding height. Certain tephigram outputs are available from this function including: 1) Lower Condensation Level (LCL) in m 2) Level of Free Convection (LFC) in m 3) Environmental Level (EL) in m 4) Convective Available Potential Energy (CAPE) in J/kg 5) Convective INhibition (CIN) in J/kg Parameters ---------- plot_tephigram : bool, optional, default is False Specify True to plot a tephigram of the sounding data. Otherwise just calculate the sounding indices plot_larkhill : bool, optional, default is False Specify True to add the sounding from Camborne at the closest time to the launch time. Only used if plot_tephigram is True. Outputs ------- References ---------- <NAME>., 2010. Water in the Atmosphere. In: Thermal Physics of the Atmosphere. Oxford: Wiley & Sons, pp. 93-109 <NAME>. 2018. Tephigram. Original Matlab code found in Matlab_Code directory <NAME>. 2018. Tephigram. Original Python code found in the same directory. """ if self.verbose is True: gu.cprint("[INFO] You are running Radiosonde_Tephigram from the STABLE release", type='bold') ############################################################################ """Prerequisites""" # Time Controls t_begin = time.time() # Set-up data importer EPCC_Data = EPCC_Importer() ############################################################################ """[Step 1] Calibrate bespoke sensors""" # Return Data (make local to function only. i.e. DON'T use self.Radiosonde_Data) Radiosonde_Data = self.Radiosonde_Data[self.sensor_package]['Counts'].copy() Radiosonde_File = self.Radiosonde_File[self.sensor_package] # Extract data into easy to read variables Z = Radiosonde_Data['height'][1:] Tdry = Radiosonde_Data['Tdry'][1:] Tdew = Radiosonde_Data['Tdew'][1:] Pres = Radiosonde_Data['P'][1:] RH = Radiosonde_Data['RH'][1:]/100; RH -= np.max(RH) - 0.01 Wind_Mag = (Radiosonde_Data['u'][1:]**2 + Radiosonde_Data['v'][1:]**2)**0.5 Wind_Dir = np.arctan2(Radiosonde_Data['u'][1:], Radiosonde_Data['v'][1:]) * 180 / np.pi ############################################################################ """[Step 2] Create Tephigram""" if plot_tephigram is True: if self.verbose is True: print("[INFO] Plotting Tephigram...") print("plot_larkhill", plot_larkhill) # Unpack variables Z_Plot = Radiosonde_Data['height'] Tdry_Plot = Radiosonde_Data['Tdry'] Tdew_Plot = Radiosonde_Data['Tdew'] Pres_Plot = Radiosonde_Data['P'] # Subset the tephigram to specified location locator = gu.argneararray(Z_Plot, np.array(self.height_range)*1000) anchor = np.array([(Pres_Plot[locator]),(Tdry_Plot[locator])]).T Pres_Plot_Antinan, Tdry_Plot_Antinan, Tdew_Plot_Antinan = gu.antinan(np.array([Pres_Plot, Tdry_Plot, Tdew_Plot]), unpack=True) # Group the dews, temps and wind profile measurements dews = zip(Pres_Plot_Antinan, Tdew_Plot_Antinan) temps = zip(Pres_Plot_Antinan, Tdry_Plot_Antinan) barb_vals = zip(Pres,Wind_Dir,Pres_Plot) # Create Tephigram plot Tephigram = SPTephigram() # Plot Reading sounding data profile_t1 = Tephigram.plot(temps, color="red", linewidth=1, label='Reading Dry Bulb Temperature', zorder=5) profile_d1 = Tephigram.plot(dews, color="blue", linewidth=1, label='Reading Dew Bulb Temperature', zorder=5) # Plot Larkhill sounding data if plot_larkhill is True: # Determine ULS data ULS_File = sorted(glob.glob(PhD_Global.Raw_Data_Path + 'Met_Data/ULS/03743/*')) # Check any files were found if len(ULS_File) > 0: ULS_Date = np.zeros(len(ULS_File), dtype=object) for i, file in enumerate(ULS_File): try: ULS_Date[i] = datetime.strptime(os.path.basename(file), '%Y%m%d_%H_03743_UoW_ULS.csv') except: ULS_Date[i] = datetime(1900,1,1) # Find Nearest Upper Level Sounding Flight to Radiosonde Flight ID = gu.argnear(ULS_Date, self.Launch_Datetime[self.sensor_package]) # Check the amount of time between Reading and Larkhill # soundings does not exceed 24 hrs. if np.abs(self.Launch_Datetime[self.sensor_package] - ULS_Date[ID]).seconds < 86400: print("[INFO] Radiosonde Launch Time:", self.Launch_Datetime[self.sensor_package], "Larkhill Launch Time:", ULS_Date[ID]) # Import Larkhill Radiosonde Data press_larkhill, temps_larkhill, dews_larkhill = EPCC_Data.ULS_Calibrate(ULS_File[ID], unpack=True, PRES=True, TEMP=True, DWPT=True) # Match Larkhill pressures with Reading pressures mask = [gu.argnear(press_larkhill, Pres_Plot[0]), gu.argnear(press_larkhill, Pres_Plot[-1])] press_larkhill = press_larkhill[mask[0]:mask[1]] temps_larkhill = temps_larkhill[mask[0]:mask[1]] dews_larkhill = dews_larkhill[mask[0]:mask[1]] dews_larkhill = zip(press_larkhill, dews_larkhill) temps_larkhill = zip(press_larkhill, temps_larkhill) # Plot Larkhill sounding data profile_t1 = Tephigram.plot(temps_larkhill, color="red", linestyle=':', linewidth=1, label='Larkhill Dry Bulb Temperature', zorder=5) profile_d1 = Tephigram.plot(dews_larkhill, color="blue", linestyle=':', linewidth=1, label='Larkhill Dew Bulb Temperature', zorder=5) else: #warnings.warn("[WARNING] No Larkhill (03743) sounding data was found within 24hrs of the ascent!", ImportWarning) gu.cprint("[WARNING] No Larkhill (03743) sounding data was found within 24hrs of the ascent!", type='warning') else: #warnings.warn("[WARNING] No Larkhill (03743) sounding data was found!", ImportWarning) gu.cprint("[WARNING] No Larkhill (03743) sounding data was found!", type='warning') # Add extra information to Tephigram plot # Tephigram.axes.set(title=Title, xlabel="Potential Temperature $(^\circ C)$", ylabel="Dry Bulb Temperature $(^\circ C)$") Title = 'Radiosonde Tephigram Flight No.' + str(self.sensor_package) + ' (' + self.Launch_Datetime[self.sensor_package].strftime("%d/%m/%Y %H%MUTC") + ')' if self.GPS_File is not None else 'Radiosonde Tephigram Flight (N/A)' Tephigram.axes.set(title=Title) # [OPTIONAL] Add wind profile information to Tephigram. # profile_t1.barbs(barb_vals) ############################################################################ """Save plot to file""" # Specify the directory the plots are stored in path = os.path.dirname(Radiosonde_File).replace(self.Storage_Path + self.Processed_Data_Path,"") # Find any other plots stored in this directory previous_plots = glob.glob(self.Storage_Path + self.Tephigram_Plots_Path + path + "/*") # Find the biggest 'v' number in plots plot_version = [] for plots in previous_plots: try: plot_version.append(int(os.path.basename(plots)[34:37])) except ValueError: plot_version.append(int(os.path.basename(plots)[34:36])) plot_version = str(np.max(plot_version)+1) if len(plot_version) != 0 else '1' # Create full directory and file name Save_Location = self.Storage_Path + self.Tephigram_Plots_Path + path + '/' + path + '_v' + plot_version.rjust(2,'0') + '_' + str(self.height_range[0]).rjust(2,'0') + 'km_to_' + str(self.height_range[1]).rjust(2,'0') + 'km.png' # Ensure the directory exists on file system and save to that location gu.ensure_dir(os.path.dirname(Save_Location)) print("Save_Location", Save_Location) Tephigram.savefig(Save_Location) ############################################################################ """[Step 3] Calculate Stability Indices""" print("[INFO] Calculating Stability Indices...") # Common Pressure Levels P_500 = gu.argnear(Pres, 500) P_700 = gu.argnear(Pres, 700) P_850 = gu.argnear(Pres, 850) # Showalter stability index #S = Tdry[P_500] - Tl # K-Index K = (Tdry[P_850] - Tdry[P_500]) + Tdew[P_850] - (Tdry[P_700] - Tdew[P_700]) # Cross Totals Index CT = Tdew[P_850] - Tdry[P_500] # Vertical Totals Index VT = Tdry[P_850] - Tdry[P_500] # Total Totals Index TT = VT + CT # SWEAT Index ms2kn = 1.94384 # Conversion between m/s to knots SW_1 = 20*(TT-49) SW_2 = 12*Tdew[P_850] SW_3 = 2*Wind_Mag[P_850]*ms2kn SW_4 = Wind_Mag[P_500]*ms2kn SW_5 = 125*(np.sin(Wind_Dir[P_500]-Wind_Dir[P_850]) + 0.2) # Condition SWEAT Term 1 from several conditions SW_1 = 0 if SW_1 < 49 else SW_1 # Condition SWEAT Term 5 with
import bottle import bottle.ext.sqlite from bottle import Bottle, abort, response, run, static_file, template, debug, redirect, request import json import sys app = Bottle() #install db connection plugin = bottle.ext.sqlite.Plugin(dbfile='./db/dietetika.sqlite') app.install(plugin) app.catchall = False # statične datoteka, torej css/js/fonti @app.get('/fonts/<filename:path>') def fonts(filename): return static_file(filename, root='static/fonts/') @app.get('/css/<filename:path>') def css(filename): return static_file(filename, root='static/css/') @app.get('/js/<filename:path>') def js(filename): return static_file(filename, root='static/js/') # routing @app.route('/') def index(): return template('index.tpl') @app.route('/entry') def domov(): redirect('/') # prikaz zivil @app.route('/consumables') def consumables(db): is_ajax = request.query.isAjax # komunicira s strežnikom, ne rabimo refreshat if is_ajax == '1': #če je ena vrne podatek autocomplete query = request.query.query #kar smo napisali v okno q = """SELECT id, title FROM consumable WHERE title LIKE :title""" title_string = "%{title}%".format(title = query) c = db.execute(q, {'title': title_string}) results = c.fetchall() results_dict = [x['title'] for x in results] #json prebere returned_dict = {'suggestions': results_dict} return json.dumps(returned_dict) else: #vrne stran #uporaben ko so posredovani search parameteri query_dict = {} title = request.query.title consumable_type_id = request.query.consumable_type_select q = """SELECT c.id, c.title, c.calories, ct.title as consumable_type_title, count(chn.nutrient_id) as nutrient_count FROM consumable c LEFT JOIN consumable_type ct ON (c.consumable_type_id = ct.id) LEFT JOIN consumable_has_nutrient chn ON (chn.consumable_id = c.id) WHERE 1 """ if title: #če smo kej zapisali v search title_string = "%{title}%".format(title=title) query_dict['title'] = title_string q += " AND c.title LIKE :title " if consumable_type_id: #iščemo tip query_dict['consumable_type_id'] = consumable_type_id q += " AND c.consumable_type_id = :consumable_type_id " q += """GROUP BY c.id ORDER BY c.id""" c = db.execute(q, query_dict) r_consumables = c.fetchall() q = """SELECT * FROM consumable_type""" c = db.execute(q) consumable_types = c.fetchall() return template("consumables-list.tpl", consumables = r_consumables, consumable_types = consumable_types, status_text = None) # prikaz podatkov o zivilu @app.route('/consumable/<consumable_id>') #<> obvezen parameter def consumable(consumable_id, db): q = """SELECT c.id, c.title, ct.title as consumable_type_title, c.calories FROM consumable c LEFT JOIN consumable_type ct ON (c.consumable_type_id = ct.id) WHERE c.id = ?""" c = db.execute(q, (consumable_id,)) consumable = c.fetchone() q_nutrients = """SELECT n.id, n.title, nt.title as nutrient_type_title, chn.value FROM consumable_has_nutrient chn LEFT JOIN nutrient n ON (chn.nutrient_id = n.id) LEFT JOIN nutrient_type nt ON (n.nutrient_type_id = nt.id) WHERE chn.consumable_id = ? """ c = db.execute(q_nutrients, (consumable_id,)) nutrients = c.fetchall() return template("consumable-details.tpl", c = consumable, n = nutrients) # brisanje zivila @app.route('/consumable-delete/<id>') def consumable_delete(id, db): q = "DELETE FROM consumable WHERE id = ? " c = db.execute(q,(id,)) if c.rowcount == 1: #koliko vrstic se je spremenilo redirect('/consumables?changes=deleted') else: redirect('/consumable/{id}'.format(id=id)) # prikaz podatkov za urejanje zivila @app.route('/consumable-edit/<id>') def consumable_edit(id, db): # consumable data gets send to a form # nutrients are generated using ajax to keep track of removed modify_type = 'edit' # consumable types q = """SELECT * FROM consumable_type""" c = db.execute(q) consumable_types = c.fetchall() # nutrients q = """SELECT * FROM nutrient""" c = db.execute(q) nutrients = c.fetchall() # consumable info q = """SELECT c.id, c.title, c.calories, c.consumable_type_id FROM consumable c WHERE c.id = ? """ c = db.execute(q, (id,)) consumable = c.fetchone() # consumable has nutrients q = """SELECT n.id, n.title, chn.value FROM consumable_has_nutrient chn LEFT JOIN nutrient n ON (chn.nutrient_id = n.id) WHERE chn.consumable_id = ? """ c = db.execute(q, (id,)) consumable_nutrients = c.fetchall() consumable_nutrients = json.dumps([dict(x) for x in consumable_nutrients]) return template('consumable.tpl', modify_type = modify_type, ct = consumable_types, n = nutrients, cn = consumable_nutrients, c = consumable, e = None) # shranjevanje v bazo urejanje zivila # ajax in json @app.route('/consumable-edit/<id>', method='POST', sqlite = {'autocommit': False}) def consumable_edit(id, db): try: db.isolation_level = "DEFERRED" req_json = request.json q = """UPDATE consumable SET title = ?, consumable_type_id = ?, calories = ? WHERE id = ?""" db.execute(q, (req_json['title'], req_json['consumable_type_id'], req_json['calories'], id)) valid_nutrient_ids = [] for nutrient in req_json['nutrients']: # 1.check if combination exists in this case change value q = """SELECT * FROM consumable_has_nutrient WHERE consumable_id = ? AND nutrient_id = ?; """ c = db.execute(q, (id, nutrient['id'])) consumable_nutrient = c.fetchone() valid_nutrient_ids.append(nutrient['id']) if (consumable_nutrient is not None): q = """UPDATE consumable_has_nutrient SET value = ? WHERE consumable_id = ? AND nutrient_id = ?;""" db.execute(q, (nutrient['value'], id, nutrient['id'])) else: q = """INSERT INTO consumable_has_nutrient (consumable_id, nutrient_id, value) VALUES (?, ?, ?);""" db.execute(q, (id, nutrient['id'], nutrient['value'])) # 2. delete those that dont fix # valid_nutrient_ids = '(' + valid_nutrient_ids + ')' q = """DELETE FROM consumable_has_nutrient WHERE consumable_id = ? AND nutrient_id NOT IN (%s)""" % ("?," * len(valid_nutrient_ids))[:-1] valid_nutrient_ids.insert(0, id) c = db.execute(q, valid_nutrient_ids) db.commit() return json.dumps('Živilo uspešno posodobljeno.') except db.IntegrityError: db.rollback() response.status = 500 return 'Živilo s takšnim imenom že obstaja' except db.Error: e = sys.exc_info()[0] db.rollback() response.status = 500 return str(e) except: db.rollback() e = sys.exc_info()[0] response.status = 500 return str(e) #prikaz vnosnih polj za vnos zivila @app.route('/consumables-enter') def consumable_enter(db): # db manipulation gets through ajax # this is just to show template modify_type = 'enter' #zapis v html je data- modity_type za enter in edit je isti template # consumable types q = """SELECT * FROM consumable_type""" c = db.execute(q) consumable_types = c.fetchall() # nutrients q = """SELECT * FROM nutrient""" c = db.execute(q) nutrients = c.fetchall() consumable_nutrients = None #tuki še ne obstaja consumable = None return template('consumable.tpl', modify_type = modify_type, ct = consumable_types, n = nutrients, cn = consumable_nutrients, c = consumable, e = None) # ajax create za vnos zivila @app.route('/consumables-enter', method = 'POST', sqlite = {'autocommit': False}) def consumable_enter_post(db): #dietetika js try: db.isolation_level = "DEFERRED" req_json = request.json # 1. add consumable q = """INSERT INTO consumable (title, consumable_type_id, calories) VALUES ( ?, ?, ? )""" c = db.execute(q, (req_json['title'], req_json['consumable_type_id'], req_json['calories'])) if(c.rowcount > 0): # consumable id consumable_id = c.lastrowid #zadnji vnešeni # 2. add nutriens if exist if(req_json['nutrients'] and len(req_json['nutrients']) > 0): for nutrient in req_json['nutrients']: q = """INSERT INTO consumable_has_nutrient (consumable_id, nutrient_id, value) VALUES (?, ?, ?)""" db.execute(q, (consumable_id, nutrient['id'], nutrient['value'])) c = db.commit() return json.dumps("Živilo uspešno ustvarjeno.") except db.IntegrityError: db.rollback() response.status = 500 return "Živilo s takšnim imenom že obstaja" except db.Error: db.rollback() #skensli vse kar je do zdej vnešeno e = sys.exc_info()[0] response.status = 500 #napaka na serverju return str(e) #console error except Exception as e: db.rollback() response.status = 500 return str(e) # prikaz hranil @app.route('/nutrients') def nutrients_list(db): # check if ajax or normal ( json / template ) is_ajax = request.query.isAjax if is_ajax == '1': q = """SELECT * FROM nutrient n""" c = db.execute(q) nutrients = c.fetchall() return json.dumps( [dict(ix) for ix in nutrients] ) else: q = """SELECT n.id, n.title, nt.title as nutrient_type_title FROM nutrient n LEFT JOIN nutrient_type nt ON (n.nutrient_type_id = nt.id)""" c = db.execute(q) nutrients = c.fetchall() return template('nutrient-list.tpl', nutrients = nutrients) #prikaz podatkov o hranilu @app.route('/nutrient/<id>') def nutrient_details(id, db): q = """SELECT n.id, n.title, nt.id as nutrient_type_id, nt.title as nutrient_type_title FROM nutrient n LEFT JOIN nutrient_type nt ON (n.nutrient_type_id = nt.id) WHERE n.id = ?""" c = db.execute(q, (id,)) nutrient = c.fetchone() return template('nutrient-details.tpl', n = nutrient) # brisanje hranila @app.route('/nutrient-delete/<id>') def nutrient_delete(id, db): q = """DELETE FROM nutrient WHERE id = ?""" db.execute(q, (id,)) redirect('/nutrients?changes=deleted') # prikaz vnostnih polj pri vnosu hranila @app.route('/nutrients-enter') def nutrient_enter(db): q = """SELECT * FROM nutrient_type;""" #pokažemo v dropdownu c = db.execute(q) nutrient_types = c.fetchall() return template('nutrient.tpl', nt = nutrient_types, n = None, e = None) # shranjevanje podatkov v bazo pri vnosu novega hranila @app.route('/nutrients-enter', method='POST') #isto kot app.post je na isti strani def nutrient_enter_post(db): try: title = request.forms.title #glede na name #ker je INT FK ga je potrebno pretvorit iz stringa v int, drugače ne primerja prov v integrity err templateu nutrient_type_id = int(request.forms.nutrient_type_id) nutrient = {'title': title, 'nutrient_type_id': nutrient_type_id} q = """INSERT INTO nutrient (title, nutrient_type_id) VALUES (:title, :nutrient_type_id);""" c = db.execute(q, nutrient) if (c.rowcount > 0): redirect('/nutrients?changes=saved') except db.IntegrityError: q = """SELECT * FROM nutrient_type;""" #pokažemo v dropdownu c = db.execute(q) nutrient_types = c.fetchall() return template('nutrient.tpl', nt = nutrient_types, n = nutrient, e = "Hranilo s takšnim imenom že obstaja") # prikaz podatkov pri urejanju hranila @app.route('/nutrient-edit/<id>') def nutrient_edit(id, db): #id rabimo da iščemo v bazi #nutrient details q = """SELECT * FROM nutrient WHERE id = ?; """ c = db.execute(q, (id,)) nutrient = c.fetchone() #nutrient types q = """SELECT * FROM nutrient_type;""" c = db.execute(q) nutrient_types = c.fetchall() return template('nutrient.tpl', n = nutrient, nt = nutrient_types, e = None) # shranjevanje v bazo pri urejanju hranila @app.route('/nutrient-edit/<id>', method='POST') def nutrient_edit_post(id, db): try: title = request.forms.title nutrient_type_id = int(request.forms.nutrient_type_id) #string v int ( ni potrebno za bazo ampak ce pride do integrity err in b) nutrient = {'title': title, 'nutrient_type_id': nutrient_type_id, 'id': id} q = """UPDATE nutrient SET title = :title, nutrient_type_id = :nutrient_type_id WHERE id = :id;""" c = db.execute(q, nutrient) if (c.rowcount > 0): redirect('/nutrients?changes=updated') except db.IntegrityError: q = """SELECT * FROM nutrient_type;""" #pokažemo v dropdownu c = db.execute(q) nutrient_types = c.fetchall() return template('nutrient.tpl', n = nutrient, nt = nutrient_types, e = 'Hranilo s takim imenom že obstaja') #tipi zivil @app.route('/consumable-types') def consumable_types(db): q = """SELECT * FROM consumable_type""" c = db.execute(q) consumable_types = c.fetchall() return template('consumable-type-list.tpl', consumable_types = consumable_types) #brisanje tipa zivila @app.route('/consumable-type-delete/<id>') def consumable_types_delete(id, db): q = """DELETE FROM consumable_type WHERE id =:id;""" c = db.execute(q, {'id': id}) if (c.rowcount > 0): redirect('/consumable-types?changes=deleted') #prikaz tipov zivila @app.route('/consumable-type/<id>') def consumable_types_details(id, db): q = """SELECT * FROM consumable_type WHERE id = ?;""" c = db.execute(q, (id,)) consumable_type = c.fetchone() return template('consumable-types-details.tpl', ct = consumable_type) #prikaz podatkov za urejanje tipa zivila @app.route('/consumable-type-edit/<id>') def consumable_types_edit(id, db): q = """SELECT * FROM consumable_type WHERE id = :id;""" c = db.execute(q, {'id': id}) consumable_type = c.fetchone() return template('consumable-types.tpl', ct = consumable_type, e = None) #shranjevanje sprememb pri urejanju tipa zivila @app.route('/consumable-type-edit/<id>', method='POST') def consumable_types_edit_post(id, db): try: title = request.forms.title consumable_type = {'title': title, 'id': id} q = """UPDATE consumable_type SET title = :title WHERE id = :id;""" c = db.execute(q, consumable_type) if (c.rowcount > 0): redirect('/consumable-types?changes=updated') except db.IntegrityError as e: return template('consumable-types.tpl', ct = consumable_type, e = "Tip živila s takim imenom že obstaja") #prikaz vnosnih polj za vnos tipa zivila @app.route('/consumable-types-enter') def
<reponame>adolgert/cascade<filename>src/cascade/model/priors.py from copy import copy from functools import total_ordering import numpy as np import scipy.stats as stats from cascade.core import getLoggers CODELOG, MATHLOG = getLoggers(__name__) # A description of how dismod interprets these distributions and their parameters can be found here: # https://bradbell.github.io/dismod_at/doc/prior_table.htm class PriorError(ValueError): """Wrong value passed into the priors.""" @total_ordering class _Prior: """The base for all Priors """ density = None def __init__(self, name=None): self.name = name def _parameters(self): raise NotImplementedError() def parameters(self): return dict(density=self.density, **self._parameters()) def assign(self, **kwargs): """Create a new distribution with modified parameters.""" modified = copy(self) if set(kwargs.keys()) - set(self.__dict__.keys()): missing = list(sorted(set(kwargs.keys()) - set(self.__dict__.keys()))) raise AttributeError(f"The prior doesn't have these attributes {missing}.") modified.__dict__.update(kwargs) return modified def __hash__(self): return hash((frozenset(self.parameters().items()), self.name)) def __eq__(self, other): if not isinstance(other, _Prior): return NotImplemented return self.name == other.name and self.parameters() == other.parameters() def __lt__(self, other): if not isinstance(other, _Prior): return NotImplemented self_dict = sorted([(k, v) for k, v in dict(name=self.name, **self.parameters()).items() if v is not None]) other_dict = sorted([(k, v) for k, v in dict(name=other.name, **other.parameters()).items() if v is not None]) return self_dict < other_dict def __repr__(self): return f"<{type(self).__name__} {self.parameters()}>" def _validate_bounds(lower, mean, upper): any_nones = lower is None or mean is None or upper is None any_invalid = any_nones or np.isnan(lower) or np.isnan(mean) or np.isnan(upper) if any_invalid: raise PriorError(f"Bounds contain invalid values: lower={lower} mean={mean} upper={upper}") if not lower <= mean <= upper: raise PriorError(f"Bounds are inconsistent: lower={lower} mean={mean} upper={upper}") def _validate_standard_deviation(standard_deviation): if standard_deviation is None or np.isnan(standard_deviation) or standard_deviation < 0: raise PriorError(f"Standard deviation must be positive: standard deviation={standard_deviation}") def _validate_nu(nu): if nu is None or np.isnan(nu) or nu <= 2: raise PriorError(f"Nu must be greater than 2: nu={nu}") class Uniform(_Prior): density = "uniform" def __init__(self, lower, upper, mean=None, eta=None, name=None): """ Args: lower (float): Lower bound upper (float): Upper bound mean (float): Doesn't make sense, but it's used to seed solver. eta (float): Used for logarithmic distributions. name (str): A name in case this is a pet prior. """ super().__init__(name=name) if mean is None: mean = (upper + lower) / 2 _validate_bounds(lower, mean, upper) self.lower = lower self.upper = upper self.mean = mean self.eta = eta def mle(self, draws): """Using draws, assign a new mean, guaranteed between lower and upper. Args: draws (np.ndarray): 1D array of floats. Returns: Uniform: A new distribution with the mean set to the mean of draws. """ return self.assign(mean=min(self.upper, max(self.lower, np.mean(draws)))) def rvs(self, size=1, random_state=None): """Sample from this distribution. Args: size (int): Number of random variates, default 1. random_state (numpy.random.RandomState): For repeatable draws. Returns: np.ndarray: Of size=size with floats. """ return stats.uniform.rvs(loc=self.lower, scale=self.upper - self.lower, size=size, random_state=random_state) def _parameters(self): return {"lower": self.lower, "upper": self.upper, "mean": self.mean, "eta": self.eta} class Constant(_Prior): density = "uniform" def __init__(self, mean, name=None): """ Args: mean (float): The const value. name (str): A name for this prior, e.g. Susan. """ super().__init__(name=name) self.mean = mean def mle(self, _=None): """Don't change the const value. It is unaffected by this call.""" return copy(self) def rvs(self, size=1, random_state=None): """Sample from this distribution. Args: size (int): Number of random variates, default 1. random_state (numpy.random.RandomState): For repeatable draws. Returns: np.ndarray: Of size=size with floats. """ return np.full((size,), self.mean, dtype=np.float) def _parameters(self): return {"lower": self.mean, "upper": self.mean, "mean": self.mean} class Gaussian(_Prior): r"""A Gaussian is .. math:: f(x) = \frac{1}{2\pi \sigma^2} e^{-(x-\mu)^2/(2\sigma^2)} where :math:`\sigma` is the variance and :math:`\mu` the mean. Args: mean (float): This is :math:`\mu`. standard_deviation (float): This is :math:`\sigma`. lower (float): lower limit. upper (float): upper limit. eta (float): Offset for calculating standard deviation. name (str): Name for this prior. """ density = "gaussian" def __init__(self, mean, standard_deviation, lower=float("-inf"), upper=float("inf"), eta=None, name=None): super().__init__(name=name) _validate_bounds(lower, mean, upper) _validate_standard_deviation(standard_deviation) self.lower = lower self.upper = upper self.mean = mean self.standard_deviation = standard_deviation self.eta = eta def mle(self, draws): """Assign new mean and stdev, with mean clamped between upper and lower. Args: draws (np.ndarray): A 1D array of floats. Returns: Gaussian: With mean and stdev set, where mean is between upper and lower, by force. Upper and lower are unchanged. """ # The mean and standard deviation for Dismod-AT match the location # and scale used by Scipy. mean, std = stats.norm.fit(draws) return self.assign( mean=min(self.upper, max(self.lower, mean)), standard_deviation=std ) def rvs(self, size=1, random_state=None): """Sample from this distribution. Args: size (int): Number of random variates, default 1. random_state (numpy.random.RandomState): For repeatable draws. Returns: np.ndarray: Of size=size with floats. """ vals = np.empty((0,), dtype=np.float) while vals.shape[0] < size: redraw_cnt = size - vals.shape[0] + 10 draws = stats.norm.rvs( loc=self.mean, scale=self.standard_deviation, size=redraw_cnt, random_state=random_state) draws = draws[(self.lower < draws) & (draws < self.upper)] vals = np.concatenate([vals, draws]) return vals[:size] def _parameters(self): return { "lower": self.lower, "upper": self.upper, "mean": self.mean, "std": self.standard_deviation, "eta": self.eta, } class Laplace(Gaussian): r""" This version of the Laplace distribution is parametrized by its variance instead of by scaling of the axis. Usually, the Laplace distribution is .. math:: f(x) = \frac{1}{2b}e^{-|x-\mu|/b} where :math:`\mu` is the mean and :math:`b` is the scale, but the variance is :math:`\sigma^2=2b^2`, so the Dismod-AT version looks like .. math:: f(x) = \frac{1}{\sqrt{2\pi\sigma^2}}e^{-\sqrt{2}|x-\mu|/\sigma}. The standard deviation assigned is :math:`\sigma`. """ density = "laplace" def mle(self, draws): """Assign new mean and stdev, with mean clamped between upper and lower. Args: draws (np.ndarray): A 1D array of floats. Returns: Gaussian: With mean and stdev set, where mean is between upper and lower, by force. Upper and lower are unchanged. """ mean, scale = stats.laplace.fit(draws) return self.assign( mean=min(self.upper, max(self.lower, mean)), standard_deviation=scale * np.sqrt(2) # This is the adjustment. ) def rvs(self, size=1, random_state=None): """Sample from this distribution. Args: size (int): Number of random variates, default 1. random_state (numpy.random.RandomState): For repeatable draws. Returns: np.ndarray: Of size=size with floats. """ vals = np.empty((0,), dtype=np.float) while vals.shape[0] < size: redraw_cnt = size - vals.shape[0] + 10 draws = stats.laplace.rvs( loc=self.mean, scale=self.standard_deviation / np.sqrt(2), size=redraw_cnt, random_state=random_state) draws = draws[(self.lower < draws) & (draws < self.upper)] vals = np.concatenate([vals, draws]) return vals[:size] class StudentsT(_Prior): r""" This Students-t must have :math:`\nu>2`. Students-t distribution is usually .. math:: f(x,\nu) = \frac{\Gamma((\nu+1)/2)}{\sqrt{\pi\nu}\Gamma(\nu)}(1+x^2/\nu)^{-(\nu+1)/2} with mean 0 for :math:`\nu>1`. The variance is :math:`\nu/(\nu-2)` for :math:`\nu>2`. Dismod-AT rewrites this using :math:`\sigma^2=\nu/(\nu-2)` to get .. math:: f(x) = \frac{\Gamma((\nu+1)/2)}{\sqrt(\pi\nu)\Gamma(\nu/2)} \left(1 + (x-\mu)^2/(\sigma^2(\nu-2))\right)^{-(\nu+1)/2} """ density = "students" def __init__(self, mean, standard_deviation, nu, lower=float("-inf"), upper=float("inf"), eta=None, name=None): super().__init__(name=name) _validate_bounds(lower, mean, upper) _validate_standard_deviation(standard_deviation) _validate_nu(nu) self.lower = lower self.upper = upper self.mean = mean self.standard_deviation = standard_deviation self.nu = nu self.eta = eta def mle(self, draws): """Assign new mean and stdev, with mean clamped between upper and lower. Args: draws (np.ndarray): A 1D array of floats. Returns: Gaussian: With mean and stdev set, where mean is between upper and lower, by force. Upper and lower are unchanged. """ # This fixes the nu value. nu, mean, scale = stats.t.fit(draws, fix_df=self.nu) return self.assign( mean=min(self.upper, max(self.lower, mean)), standard_deviation=scale * np.sqrt(nu / (nu - 2)) ) def rvs(self, size=1, random_state=None): """Sample from this distribution. Args: size (int): Number of random variates, default 1. random_state (numpy.random.RandomState): For repeatable draws. Returns: np.ndarray: Of size=size with floats. """ vals = np.empty((0,), dtype=np.float) std_scale = np.sqrt(self.nu / (self.nu - 2)) while vals.shape[0] < size: redraw_cnt = size - vals.shape[0] + 10 draws = stats.t.rvs( loc=self.mean, scale=self.standard_deviation / std_scale, df=self.nu, size=redraw_cnt, random_state=random_state) draws = draws[(self.lower < draws) & (draws < self.upper)] vals = np.concatenate([vals, draws]) return vals[:size] def _parameters(self): return { "lower": self.lower, "upper": self.upper, "mean": self.mean, "std": self.standard_deviation, "nu": self.nu, "eta": self.eta, } class LogGaussian(_Prior): r""" Dismod-AT parametrizes the Log-Gaussian with the standard deviation as .. math:: f(x) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\log((x-\mu)/\sigma)^2/2} """ density = "log_gaussian" def __init__(self, mean, standard_deviation, eta, lower=float("-inf"), upper=float("inf"), name=None): super().__init__(name=name) _validate_bounds(lower, mean, upper) _validate_standard_deviation(standard_deviation) self.lower = lower self.upper = upper self.mean = mean self.standard_deviation = standard_deviation self.eta = eta def mle(self, draws): """Assign new mean and stdev, with mean clamped between upper and lower. This does a fit using a normal distribution. Args: draws (np.ndarray): A 1D array of floats. Returns: Gaussian: With mean and stdev set, where mean is between upper and lower, by
= None self.y_pos = None self.width = None self.height = None self.titlebar_height = None self.geom_update[int, int, int, int, int].connect(self.save_geometry) self.final_save.connect(self.app.final_save) self.shell_dock.visibilityChanged.connect(self.on_shelldock_toggled) # Notebook and Plot Tab Area signals # make the right click on the notebook tab and plot tab area tab raise a menu self.notebook.tabBar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) self.plot_tab_area.tabBar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) self.on_tab_setup_context_menu() # activate initial state self.on_detachable_tab_rmb_click(self.app.defaults["global_tabs_detachable"]) # status bar activation/deactivation self.infobar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu) self.build_infobar_context_menu() def set_ui_title(self, name): """ Sets the title of the main window. :param name: String that store the project path and project name :return: None """ title = 'FlatCAM %s %s - %s - [%s] %s' % ( self.app.version, ('BETA' if self.app.beta else ''), platform.architecture()[0], self.app.engine, name) self.setWindowTitle(title) def save_geometry(self, x, y, width, height, notebook_width): """ Will save the application geometry and positions in the defaults dicitionary to be restored at the next launch of the application. :param x: X position of the main window :param y: Y position of the main window :param width: width of the main window :param height: height of the main window :param notebook_width: the notebook width is adjustable so it get saved here, too. :return: None """ self.app.defaults["global_def_win_x"] = x self.app.defaults["global_def_win_y"] = y self.app.defaults["global_def_win_w"] = width self.app.defaults["global_def_win_h"] = height self.app.defaults["global_def_notebook_width"] = notebook_width self.app.preferencesUiManager.save_defaults() def restore_main_win_geom(self): try: self.setGeometry(self.app.defaults["global_def_win_x"], self.app.defaults["global_def_win_y"], self.app.defaults["global_def_win_w"], self.app.defaults["global_def_win_h"]) self.splitter.setSizes([self.app.defaults["global_def_notebook_width"], 0]) except KeyError as e: log.debug("appGUI.MainGUI.restore_main_win_geom() --> %s" % str(e)) def restore_toolbar_view(self): """ Some toolbars may be hidden by user and here we restore the state of the toolbars visibility that was saved in the defaults dictionary. :return: None """ tb = self.app.defaults["global_toolbar_view"] if tb & 1: self.toolbarfile.setVisible(True) else: self.toolbarfile.setVisible(False) if tb & 2: self.toolbaredit.setVisible(True) else: self.toolbaredit.setVisible(False) if tb & 4: self.toolbarview.setVisible(True) else: self.toolbarview.setVisible(False) if tb & 8: self.toolbartools.setVisible(True) else: self.toolbartools.setVisible(False) if tb & 16: self.exc_edit_toolbar.setVisible(True) else: self.exc_edit_toolbar.setVisible(False) if tb & 32: self.geo_edit_toolbar.setVisible(True) else: self.geo_edit_toolbar.setVisible(False) if tb & 64: self.grb_edit_toolbar.setVisible(True) else: self.grb_edit_toolbar.setVisible(False) # if tb & 128: # self.ui.grid_toolbar.setVisible(True) # else: # self.ui.grid_toolbar.setVisible(False) # Grid Toolbar is controlled by its own setting if tb & 256: self.toolbarshell.setVisible(True) else: self.toolbarshell.setVisible(False) def on_tab_setup_context_menu(self): initial_checked = self.app.defaults["global_tabs_detachable"] action_name = str(_("Detachable Tabs")) action = QtWidgets.QAction(self) action.setCheckable(True) action.setText(action_name) action.setChecked(initial_checked) self.notebook.tabBar.addAction(action) self.plot_tab_area.tabBar.addAction(action) try: action.triggered.disconnect() except TypeError: pass action.triggered.connect(self.on_detachable_tab_rmb_click) def on_detachable_tab_rmb_click(self, checked): self.notebook.set_detachable(val=checked) self.app.defaults["global_tabs_detachable"] = checked self.plot_tab_area.set_detachable(val=checked) self.app.defaults["global_tabs_detachable"] = checked def build_infobar_context_menu(self): delta_coords_action_name = str(_("Delta Coordinates Toolbar")) delta_coords_action = QtWidgets.QAction(self) delta_coords_action.setCheckable(True) delta_coords_action.setText(delta_coords_action_name) delta_coords_action.setChecked(self.app.defaults["global_delta_coordsbar_show"]) self.infobar.addAction(delta_coords_action) delta_coords_action.triggered.connect(self.toggle_delta_coords) coords_action_name = str(_("Coordinates Toolbar")) coords_action = QtWidgets.QAction(self) coords_action.setCheckable(True) coords_action.setText(coords_action_name) coords_action.setChecked(self.app.defaults["global_coordsbar_show"]) self.infobar.addAction(coords_action) coords_action.triggered.connect(self.toggle_coords) grid_action_name = str(_("Grid Toolbar")) grid_action = QtWidgets.QAction(self) grid_action.setCheckable(True) grid_action.setText(grid_action_name) grid_action.setChecked(self.app.defaults["global_gridbar_show"]) self.infobar.addAction(grid_action) grid_action.triggered.connect(self.toggle_gridbar) status_action_name = str(_("Status Toolbar")) status_action = QtWidgets.QAction(self) status_action.setCheckable(True) status_action.setText(status_action_name) status_action.setChecked(self.app.defaults["global_statusbar_show"]) self.infobar.addAction(status_action) status_action.triggered.connect(self.toggle_statusbar) def toggle_coords(self, checked): self.app.defaults["global_coordsbar_show"] = checked self.coords_toolbar.setVisible(checked) def toggle_delta_coords(self, checked): self.app.defaults["global_delta_coordsbar_show"] = checked self.delta_coords_toolbar.setVisible(checked) def toggle_gridbar(self, checked): self.app.defaults["global_gridbar_show"] = checked self.grid_toolbar.setVisible(checked) def toggle_statusbar(self, checked): self.app.defaults["global_statusbar_show"] = checked self.status_toolbar.setVisible(checked) def eventFilter(self, obj, event): """ Filter the ToolTips display based on a Preferences setting :param obj: :param event: QT event to filter :return: """ if self.app.defaults["global_toggle_tooltips"] is False: if event.type() == QtCore.QEvent.ToolTip: return True else: return False return False def on_preferences_open_folder(self): """ Will open an Explorer window set to the folder path where the FlatCAM preferences files are usually saved. :return: None """ if sys.platform == 'win32': subprocess.Popen('explorer %s' % self.app.data_path) elif sys.platform == 'darwin': os.system('open "%s"' % self.app.data_path) else: subprocess.Popen(['xdg-open', self.app.data_path]) self.app.inform.emit('[success] %s' % _("FlatCAM Preferences Folder opened.")) def on_gui_clear(self, signal=None, forced_clear=False): """ Will clear the settings that are stored in QSettings. """ log.debug("Clearing the settings in QSettings. GUI settings cleared.") theme_settings = QtCore.QSettings("Open Source", "FlatCAM") theme_settings.setValue('theme', 'white') del theme_settings resource_loc = self.app.resource_location response = None bt_yes = None if forced_clear is False: msgbox = QtWidgets.QMessageBox() msgbox.setText(_("Are you sure you want to delete the GUI Settings? \n")) msgbox.setWindowTitle(_("Clear GUI Settings")) msgbox.setWindowIcon(QtGui.QIcon(resource_loc + '/trash32.png')) msgbox.setIcon(QtWidgets.QMessageBox.Question) bt_yes = msgbox.addButton(_('Yes'), QtWidgets.QMessageBox.YesRole) bt_no = msgbox.addButton(_('No'), QtWidgets.QMessageBox.NoRole) msgbox.setDefaultButton(bt_no) msgbox.exec_() response = msgbox.clickedButton() if forced_clear is True or response == bt_yes: qsettings = QSettings("Open Source", "FlatCAM") for key in qsettings.allKeys(): qsettings.remove(key) # This will write the setting to the platform specific storage. del qsettings def populate_toolbars(self): """ Will populate the App Toolbars with their actions :return: None """ self.app.log.debug(" -> Add actions to new Toolbars") # ######################################################################## # ##################### File Toolbar ##################################### # ######################################################################## self.file_open_gerber_btn = self.toolbarfile.addAction( QtGui.QIcon(self.app.resource_location + '/flatcam_icon32.png'), _("Open Gerber")) self.file_open_excellon_btn = self.toolbarfile.addAction( QtGui.QIcon(self.app.resource_location + '/drill32.png'), _("Open Excellon")) self.toolbarfile.addSeparator() self.file_open_btn = self.toolbarfile.addAction( QtGui.QIcon(self.app.resource_location + '/folder32.png'), _("Open Project")) self.file_save_btn = self.toolbarfile.addAction( QtGui.QIcon(self.app.resource_location + '/project_save32.png'), _("Save Project")) # ######################################################################## # ######################### Edit Toolbar ################################# # ######################################################################## self.editgeo_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/edit32.png'), _("Editor")) self.update_obj_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/close_edit_file32.png'), _("Save Object and close the Editor") ) self.toolbaredit.addSeparator() self.copy_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/copy_file32.png'), _("Copy")) self.delete_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/trash32.png'), _("Delete")) self.toolbaredit.addSeparator() self.distance_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/distance32.png'), _("Distance Tool")) self.distance_min_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/distance_min32.png'), _("Distance Min Tool")) self.origin_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/origin32.png'), _('Set Origin')) self.move2origin_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/origin2_32.png'), _('Move to Origin')) self.jmp_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/jump_to16.png'), _('Jump to Location')) self.locate_btn = self.toolbaredit.addAction( QtGui.QIcon(self.app.resource_location + '/locate32.png'), _('Locate in Object')) # ######################################################################## # ########################## View Toolbar# ############################### # ######################################################################## self.replot_btn = self.toolbarview.addAction( QtGui.QIcon(self.app.resource_location + '/replot32.png'), _("Replot")) self.clear_plot_btn = self.toolbarview.addAction( QtGui.QIcon(self.app.resource_location + '/clear_plot32.png'), _("Clear Plot")) self.zoom_in_btn = self.toolbarview.addAction( QtGui.QIcon(self.app.resource_location + '/zoom_in32.png'), _("Zoom In")) self.zoom_out_btn = self.toolbarview.addAction( QtGui.QIcon(self.app.resource_location + '/zoom_out32.png'), _("Zoom Out")) self.zoom_fit_btn = self.toolbarview.addAction( QtGui.QIcon(self.app.resource_location + '/zoom_fit32.png'), _("Zoom Fit")) # ######################################################################## # ########################## Shell Toolbar# ############################## # ######################################################################## self.shell_btn = self.toolbarshell.addAction( QtGui.QIcon(self.app.resource_location + '/shell32.png'), _("Command Line")) self.new_script_btn = self.toolbarshell.addAction( QtGui.QIcon(self.app.resource_location + '/script_new24.png'), '%s ...' % _('New Script')) self.open_script_btn = self.toolbarshell.addAction( QtGui.QIcon(self.app.resource_location + '/open_script32.png'), '%s ...' % _('Open Script')) self.run_script_btn = self.toolbarshell.addAction( QtGui.QIcon(self.app.resource_location + '/script16.png'), '%s ...' % _('Run Script')) # ######################################################################### # ######################### Tools Toolbar ################################# # ######################################################################### self.dblsided_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/doubleside32.png'), _("2-Sided Tool")) self.align_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/align32.png'), _("Align Objects Tool")) self.extract_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/extract_drill32.png'), _("Extract Drills Tool")) self.cutout_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/cut16_bis.png'), _("Cutout Tool")) self.ncc_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/ncc16.png'), _("NCC Tool")) self.paint_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/paint20_1.png'), _("Paint Tool")) self.isolation_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/iso_16.png'), _("Isolation Tool")) self.drill_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/drilling_tool32.png'), _("Drilling Tool")) self.toolbartools.addSeparator() self.panelize_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/panelize32.png'), _("Panel Tool")) self.film_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/film16.png'), _("Film Tool")) self.solder_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/solderpastebis32.png'), _("SolderPaste Tool")) self.sub_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/sub32.png'), _("Subtract Tool")) self.rules_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/rules32.png'), _("Rules Tool")) self.optimal_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/open_excellon32.png'), _("Optimal Tool")) self.toolbartools.addSeparator() self.calculators_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/calculator24.png'), _("Calculators Tool")) self.transform_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/transform.png'), _("Transform Tool")) self.qrcode_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/qrcode32.png'), _("QRCode Tool")) self.copperfill_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/copperfill32.png'), _("Copper Thieving Tool")) self.fiducials_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/fiducials_32.png'), _("Fiducials Tool")) self.cal_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/calibrate_32.png'), _("Calibration Tool")) self.punch_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/punch32.png'), _("Punch Gerber Tool")) self.invert_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/invert32.png'), _("Invert Gerber Tool")) self.corners_tool_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/corners_32.png'), _("Corner Markers Tool")) self.etch_btn = self.toolbartools.addAction( QtGui.QIcon(self.app.resource_location + '/etch_32.png'), _("Etch Compensation Tool")) # ######################################################################## # ################### Excellon Editor Toolbar ############################ # ######################################################################## self.select_drill_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/pointer32.png'), _("Select")) self.add_drill_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/plus16.png'), _('Add Drill')) self.add_drill_array_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/addarray16.png'), _('Add Drill Array')) self.resize_drill_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/resize16.png'), _('Resize Drill')) self.add_slot_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/slot26.png'), _('Add Slot')) self.add_slot_array_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/slot_array26.png'), _('Add Slot Array')) self.exc_edit_toolbar.addSeparator() self.copy_drill_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/copy32.png'), _('Copy Drill')) self.delete_drill_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/trash32.png'), _("Delete Drill")) self.exc_edit_toolbar.addSeparator() self.move_drill_btn = self.exc_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/move32.png'), _("Move Drill")) # ######################################################################## # ################### Geometry Editor Toolbar ############################ # ######################################################################## self.geo_select_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/pointer32.png'), _("Select")) self.geo_add_circle_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/circle32.png'), _('Add Circle')) self.geo_add_arc_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/arc32.png'), _('Add Arc')) self.geo_add_rectangle_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/rectangle32.png'), _('Add Rectangle')) self.geo_edit_toolbar.addSeparator() self.geo_add_path_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/path32.png'), _('Add Path')) self.geo_add_polygon_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/polygon32.png'), _('Add Polygon')) self.geo_edit_toolbar.addSeparator() self.geo_add_text_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/text32.png'), _('Add Text')) self.geo_add_buffer_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/buffer16-2.png'), _('Add Buffer')) self.geo_add_paint_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/paint20_1.png'), _('Paint Shape')) self.geo_eraser_btn = self.geo_edit_toolbar.addAction( QtGui.QIcon(self.app.resource_location + '/eraser26.png'), _('Eraser')) self.geo_edit_toolbar.addSeparator()
<reponame>dbrandt/spambayes-lite """Core Web Interface Classes: CoreUserInterface - Interface class for basic (non-plugin) display Abstract: This module implements a browser based Spambayes user interface for the the core server. Users may use it to interface with various plugins. The following functions are currently included: [From the base class UserInterface] onClassify - classify a given message onWordquery - query a word from the database onTrain - train a message or mbox onSave - save the database and possibly shutdown [Here] onHome - a home page with various options onUpload - upload a message for later training (used by proxytee.py) onReview - show messages in corpii onView - view a message from one of the corpii onShowclues - show clues for a message To do: Web training interface: o Review already-trained messages, and purge them. o Add a Today button on the Review page. User interface improvements: o Can it cleanly dynamically update its status display while having a POP3 conversation? Hammering reload sucks. o Suggestions? """ # This module is part of the spambayes project, which is Copyright 2002-2007 # The Python Software Foundation and is covered by the Python Software # Foundation license. # This module was forked from ProxyUI.py to provide a basic user interface # for core_server.py. __author__ = "<NAME> <<EMAIL>>" __credits__ = "<NAME>, <NAME>, <NAME>, all the Spambayes folk." import sys import cgi import time import types import bisect from spambayes import UserInterface from spambayes.Options import options, load_options, get_pathname_option, _ ## no i18n yet... ##from spambayes import i18n from spambayes import storage from spambayes import Stats from spambayes.FileCorpus import FileMessageFactory, GzipFileMessageFactory from spambayes.FileCorpus import ExpiryFileCorpus import spambayes.message # These are the options that will be offered on the configuration page. If # the option is None, then the entry is a header and the following options # will appear in a new box on the configuration page. These are also used # to generate http request parameters and template fields/variables. parm_ini_map = ( ('Storage Options', None), ('Storage', 'persistent_storage_file'), ('Storage', 'messageinfo_storage_file'), ('Storage', 'cache_messages'), ('Storage', 'no_cache_bulk_ham'), ('Storage', 'no_cache_large_messages'), ('Statistics Options', None), ('Categorization', 'ham_cutoff'), ('Categorization', 'spam_cutoff'), ) # Like the above, but these are the options that will be offered on the # advanced configuration page. adv_map = ( (_('Statistics Options'), None), ('Classifier', 'max_discriminators'), ('Classifier', 'minimum_prob_strength'), ('Classifier', 'unknown_word_prob'), ('Classifier', 'unknown_word_strength'), ('Classifier', 'use_bigrams'), (_('Header Options'), None), ('Headers', 'include_score'), ('Headers', 'header_score_digits'), ('Headers', 'header_score_logarithm'), ('Headers', 'include_thermostat'), ('Headers', 'include_evidence'), ('Headers', 'clue_mailheader_cutoff'), (_('Storage Options'), None), ('Storage', 'persistent_use_database'), ('Storage', 'cache_expiry_days'), ('Storage', 'cache_use_gzip'), ('Storage', 'ham_cache'), ('Storage', 'spam_cache'), ('Storage', 'unknown_cache'), (_('Tokenising Options'), None), ('Tokenizer', 'mine_received_headers'), ('Tokenizer', 'replace_nonascii_chars'), ('Tokenizer', 'summarize_email_prefixes'), ('Tokenizer', 'summarize_email_suffixes'), (_('Training Options'), None), ('Hammie', 'train_on_filter'), (_('Interface Options'), None), ('html_ui', 'display_headers'), ('html_ui', 'display_received_time'), ('html_ui', 'display_score'), ('html_ui', 'display_adv_find'), ('html_ui', 'default_ham_action'), ('html_ui', 'default_spam_action'), ('html_ui', 'default_unsure_action'), ('html_ui', 'ham_discard_level'), ('html_ui', 'spam_discard_level'), ('html_ui', 'allow_remote_connections'), ('html_ui', 'http_authentication'), ('html_ui', 'http_user_name'), ('html_ui', 'http_password'), ('globals', 'language'), ) class AlreadyRunningException(Exception): "exception may be raised if we are already running and check such things." pass class CoreUserInterface(UserInterface.UserInterface): """Serves the HTML user interface for the core server.""" def __init__(self, state): UserInterface.UserInterface.__init__(self, state.bayes, parm_ini_map, adv_map, state.lang_manager, state.stats) self.state = state self.app_for_version = "SpamBayes Proxy" if not state.can_stop: self.html._readonly = False self.html.shutdownTableCell = "&nbsp;" self.html._readonly = True def onHome(self): """Serve up the homepage.""" self.state.buildStatusStrings() stateDict = self.state.__dict__.copy() stateDict.update(self.state.bayes.__dict__) statusTable = self.html.statusTable.clone() findBox = self._buildBox(_('Word query'), 'query.gif', self.html.wordQuery) if not options["html_ui", "display_adv_find"]: del findBox.advanced content = (self._buildBox(_('Status and Configuration'), 'status.gif', statusTable % stateDict)+ self._buildBox(_('Train on proxied messages'), 'train.gif', self.html.reviewText) + self._buildTrainBox() + self._buildClassifyBox() + findBox + self._buildBox(_('Find message'), 'query.gif', self.html.findMessage) ) self._writePreamble(_("Home")) self.write(content) self._writePostamble(help_topic="home_proxy") def onUpload(self, filename): """Save a message for later training - used by Skip's proxytee.py.""" # Convert platform-specific line endings into unix-style. filename = filename.replace('\r\n', '\n').replace('\r', '\n') # Get a message list from the upload and write it into the cache. messages = self._convertUploadToMessageList(filename) for m in messages: messageName = self.state.getNewMessageName() message = self.state.unknownCorpus.makeMessage(messageName, m) self.state.unknownCorpus.addMessage(message) # Return a link Home. self.write(_("<p>OK. Return <a href='home'>Home</a>.</p>")) def _buildReviewKeys(self, timestamp): """Builds an ordered list of untrained message keys, ready for output in the Review list. Returns a 5-tuple: the keys, the formatted date for the list (eg. "Friday, November 15, 2002"), the start of the prior page or zero if there isn't one, likewise the start of the given page, and likewise the start of the next page.""" # Fetch all the message keys allKeys = self.state.unknownCorpus.keys() # We have to sort here to split into days. # Later on, we also sort the messages that will be on the page # (by whatever column we wish). allKeys.sort() # The default start timestamp is derived from the most recent message, # or the system time if there are no messages (not that it gets used). if not timestamp: if allKeys: timestamp = self._keyToTimestamp(allKeys[-1]) else: timestamp = time.time() start, end, date = self._getTimeRange(timestamp) # Find the subset of the keys within this range. startKeyIndex = bisect.bisect(allKeys, "%d" % long(start)) endKeyIndex = bisect.bisect(allKeys, "%d" % long(end)) keys = allKeys[startKeyIndex:endKeyIndex] keys.reverse() # What timestamps to use for the prior and next days? If there any # messages before/after this day's range, use the timestamps of those # messages - this will skip empty days. prior = end = 0 if startKeyIndex != 0: prior = self._keyToTimestamp(allKeys[startKeyIndex-1]) if endKeyIndex != len(allKeys): end = self._keyToTimestamp(allKeys[endKeyIndex]) # Return the keys and their date. return keys, date, prior, start, end def onReview(self, **params): """Present a list of message for (re)training.""" # Train/discard sumbitted messages. self._writePreamble("Review") id = '' numTrained = 0 numDeferred = 0 if params.get('go') != _('Refresh'): for key, value in params.items(): if key.startswith('classify:'): old_class, id = key.split(':')[1:3] if value == _('spam'): targetCorpus = self.state.spamCorpus stats_as_ham = False elif value == _('ham'): targetCorpus = self.state.hamCorpus stats_as_ham = True elif value == _('discard'): targetCorpus = None try: self.state.unknownCorpus.removeMessage( self.state.unknownCorpus[id]) except KeyError: pass # Must be a reload. else: # defer targetCorpus = None numDeferred += 1 if targetCorpus: sourceCorpus = None if self.state.unknownCorpus.get(id) is not None: sourceCorpus = self.state.unknownCorpus elif self.state.hamCorpus.get(id) is not None: sourceCorpus = self.state.hamCorpus elif self.state.spamCorpus.get(id) is not None: sourceCorpus = self.state.spamCorpus if sourceCorpus is not None: try: # fromCache is a fix for sf #851785. # See the comments in Corpus.py targetCorpus.takeMessage(id, sourceCorpus, fromCache=True) if numTrained == 0: self.write(_("<p><b>Training... ")) self.flush() numTrained += 1 self.stats.RecordTraining(\ stats_as_ham, old_class=old_class) except KeyError: pass # Must be a reload. # Report on any training, and save the database if there was any. if numTrained > 0: plural = '' if numTrained == 1: response = "Trained on one message. " else: response = "Trained on %d messages. " % (numTrained,) self._doSave() self.write(response) self.write("<br>&nbsp;") title = "" keys = [] sourceCorpus = self.state.unknownCorpus # If any messages were deferred, show the same page again. if numDeferred > 0: start = self._keyToTimestamp(id) # Else after submitting a whole page, display the prior page or the # next one. Derive the day of the submitted page from the ID of the # last processed message. elif id: start = self._keyToTimestamp(id) unused, unused, prior, unused, next = self._buildReviewKeys(start) if prior: start = prior else: start = next # Else if they've hit Previous or Next, display that page. elif params.get('go') == _('Next day'): start = self._keyToTimestamp(params['next']) elif params.get('go') == _('Previous day'): start = self._keyToTimestamp(params['prior']) # Else if an id has been specified, just show that message # Else if search criteria have been specified, show the messages # that match those criteria. elif params.get('find') is not None: prior = next = 0 keys = set() # so we don't end up with duplicates push = keys.add try: max_results = int(params['max_results']) except ValueError: max_results = 1 key = params['find'] if 'ignore_case' in params: ic = True else: ic = False error = False if key == "": error = True page = _("<p>You must enter a search string.</p>") else: if len(keys) < max_results and \ 'id' in params: if self.state.unknownCorpus.get(key): push((key, self.state.unknownCorpus)) elif self.state.hamCorpus.get(key): push((key, self.state.hamCorpus)) elif self.state.spamCorpus.get(key): push((key, self.state.spamCorpus)) if 'subject' in params or 'body' in params or \ 'headers' in params: # This is an expensive operation, so let the user know # that something is happening. self.write(_('<p>Searching...</p>'))
= ftype[indx] cfieldid = self.fields['fieldid'][indx] if(minimize_time): ifield = np.where(self.field_array['fieldid'] == cfieldid)[0] if((len(ifield) == 0)): field_minimum_float[cfieldid] = 0.0 elif (self.field_array['cadence'][ifield[0]].strip() == 'none'): field_minimum_float[cfieldid] = 0.0 else: field_minimum_float[cfieldid] = 0.95 else: if(cftype in self.observe_all_fields): field_minimum_float[cfieldid] = 0.99 else: field_minimum_float[cfieldid] = 0. total = self.slots.slots / self.slots.duration * self.slots.fclear solver = pywraplp.Solver("allocate_lst", pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) solver.SetNumThreads(64) # Set up variables; these variables ('vars') will correspond to # the number of exposures in each slot for each field-cadence. objective = solver.Objective() for fieldid in self.allocinfo: for cadence in self.allocinfo[fieldid]: ccadence = self.allocinfo[fieldid][cadence] ccadence['nvars'] = 0 for ilst in range(self.slots.nlst): for iskybrightness in range(self.slots.nskybrightness): name = "{f}-{c}-{slst}-{ssb}".format(f=fieldid, c=cadence, slst=ilst, ssb=iskybrightness) if(ccadence['slots'][ilst, iskybrightness]): var = solver.NumVar(0.0, ccadence['needed'], name) if(minimize_time is False): objective.SetCoefficient(var, ccadence['value'] / ccadence['needed']) ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness] = var ccadence['nvars'] = ccadence['nvars'] + 1 # Cadences have limits, which limit the total number of exposures # in the cadence to the total needed, and as needed in each lunation. cadence_constraints = [] for fieldid in self.allocinfo: for cadence in self.allocinfo[fieldid]: ccadence = self.allocinfo[fieldid][cadence] if(cadence in self.observe_all_cadences): minval = float(ccadence['needed'] * 0.95) else: minval = float(0.) cadence_constraint = solver.Constraint(minval, ccadence['needed']) # set total cadence constraint for iskybrightness in range(self.slots.nskybrightness): for ilst in range(self.slots.nlst): if(ccadence['slots'][ilst, iskybrightness]): var = ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness] cadence_constraint.SetCoefficient(var, 1.) cadence_constraints.append(cadence_constraint) # set ratio of dark and bright so there is enough dark Asb = np.array([ccadence['needed_sb'][1], - ccadence['needed_sb'][0]]) solver_inf = solver.infinity() cadence_constraint_sb = solver.Constraint(0., solver_inf) for iskybrightness in range(self.slots.nskybrightness): for ilst in range(self.slots.nlst): if(ccadence['slots'][ilst, iskybrightness]): var = ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness] cadence_constraint_sb.SetCoefficient(var, Asb[iskybrightness]) cadence_constraints.append(cadence_constraint_sb) # Constraints on numbers mint_constraints = dict() mint_index = dict() for cname in self.minimum_ntargets: index = np.where(self.cartons == cname)[0] mint_constraints[cname] = solver.Constraint(float(self.minimum_ntargets[cname]), float(10000000.)) mint_index[cname] = index[0] # Maximum of one cadence per field. Note that because this is an # LP and not an integer problem, this constraint involves the # definition of "fractional" cadences. From the LP solution, # we will pick the largest value cadence. field_constraints = [] for fieldid in self.allocinfo: if(len(self.allocinfo[fieldid]) > 0): field_constraint = solver.Constraint(field_minimum_float[fieldid], 1.) for cadence in self.allocinfo[fieldid]: ccadence = self.allocinfo[fieldid][cadence] for ilst in range(self.slots.nlst): for iskybrightness in range(self.slots.nskybrightness): if(ccadence['slots'][ilst, iskybrightness]): var = ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness] invneeded = 1. / ccadence['needed'] field_constraint.SetCoefficient(var, invneeded) for cname in self.minimum_ntargets: imint = mint_index[cname] mint_constraints[cname].SetCoefficient( var, invneeded * float(ccadence['ngot_pct'][imint])) field_constraints.append(field_constraint) # Constrain sum of each slot to be less than total. Here the # units are still in numbers of exposures, but we multiply by # a scaling factor (xfactor) that depends on airmass to account # for the cost of high airmass observations. slot_constraints = [[0] * self.slots.nskybrightness] * self.slots.nlst for ilst in range(self.slots.nlst): for iskybrightness in range(self.slots.nskybrightness): slot_constraints[ilst][iskybrightness] = solver.Constraint(0., float(total[ilst, iskybrightness])) for fieldid in self.allocinfo: ifield = np.where(fieldid == self.fields['fieldid'])[0] field_racen = self.fields['racen'][ifield] field_deccen = self.fields['deccen'][ifield] for cadence in self.allocinfo[fieldid]: ccadence = self.allocinfo[fieldid][cadence] if(ccadence['slots'][ilst, iskybrightness]): xfactor = self.xfactor(racen=field_racen, deccen=field_deccen, cadence=cadence, skybrightness=self.slots.skybrightness[iskybrightness + 1], lst=self.slots.lst[ilst]) slot_constraints[ilst][iskybrightness].SetCoefficient(ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness], float(xfactor)) if(minimize_time is True): objective.SetCoefficient(ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness], float(xfactor)) # Solve the problem if(minimize_time is True): objective.SetMinimization() else: objective.SetMaximization() status = solver.Solve() if(status != 0): print("Solver failed: {status}.".format(status=status)) return(status) else: print("Solver succeeded.") # Extract the solution. # Here var is a number of exposures, and so is allocation. for fieldid in self.allocinfo: for cadence in self.allocinfo[fieldid]: ccadence = self.allocinfo[fieldid][cadence] ccadence['allocation'] = np.zeros((self.slots.nlst, self.slots.nskybrightness), dtype=np.float32) for ilst in range(self.slots.nlst): for iskybrightness in range(self.slots.nskybrightness): if(ccadence['slots'][ilst, iskybrightness]): var = ccadence['vars'][ilst * self.slots.nskybrightness + iskybrightness] ccadence['allocation'][ilst, iskybrightness] = var.solution_value() # Decide on which cadences to pick. field_array_dtype = [('fieldid', np.int32), ('racen', np.float64), ('deccen', np.float64), ('cadence', np.unicode_, 30), ('nfilled', np.int32), ('needed', np.int32), ('xfactor', np.float32, (self.slots.nlst, self.slots.nskybrightness)), ('slots_exposures', np.float32, (self.slots.nlst, self.slots.nskybrightness)), ('slots_time', np.float32, (self.slots.nlst, self.slots.nskybrightness))] field_array = np.zeros(len(self.allocinfo), dtype=field_array_dtype) for findx, fieldid in zip(np.arange(len(self.allocinfo)), self.allocinfo): field_array['fieldid'][findx] = fieldid ifield = np.where(fieldid == self.fields['fieldid'])[0] field_array['racen'][findx] = self.fields['racen'][ifield] field_array['deccen'][findx] = self.fields['deccen'][ifield] ncadence = len(self.allocinfo[fieldid]) cadence_totals = np.zeros(ncadence, dtype=np.float32) slots_totals = np.zeros((self.slots.nlst, self.slots.nskybrightness), dtype=np.float32) for indx, cadence in zip(np.arange(ncadence), self.allocinfo[fieldid]): ccadence = self.allocinfo[fieldid][cadence] slots_totals = slots_totals + ccadence['allocation'] cadence_totals[indx] = ccadence['allocation'].sum() field_total = cadence_totals.sum() field_array['cadence'][findx] = 'none' field_array['slots_exposures'][findx] = ( np.zeros((self.slots.nlst, self.slots.nskybrightness), dtype=np.float32)) field_array['slots_time'][findx] = ( np.zeros((self.slots.nlst, self.slots.nskybrightness), dtype=np.float32)) if(field_total > 0.): cadence_totals = cadence_totals / field_total cadence_cumulative = cadence_totals.cumsum() choose = np.random.random() icadence = np.where(cadence_cumulative > choose)[0][0] cadence = list(self.allocinfo[fieldid].keys())[icadence] field_array['cadence'][findx] = cadence field_array['needed'][findx] = ( self.allocinfo[fieldid][cadence]['needed']) normalize = field_array['needed'][findx] / slots_totals.sum() field_array['slots_exposures'][findx] = (slots_totals * normalize) field_array['nfilled'][findx] = np.int32( field_array['slots_exposures'][findx, :, :].sum() + 0.001) fscadence = np.array([x.strip() for x in self.field_slots['cadence']]) for findx in np.arange(len(field_array), dtype=np.int32): field = field_array[findx] fcadence = field['cadence'].strip() if(fcadence != 'none'): islots = np.where((self.field_slots['fieldid'] == field['fieldid']) & (fscadence == fcadence))[0][0] curr_slots = self.field_slots[islots]['slots'] for ilst in np.arange(self.slots.nlst, dtype=np.int32): lst = self.slots.lst[ilst] for isb in np.arange(self.slots.nskybrightness, dtype=np.int32): skybrightness = self.slots.skybrightness[isb + 1] if(curr_slots[ilst, isb]): xfactor = self.xfactor(racen=field['racen'], deccen=field['deccen'], cadence=fcadence, skybrightness=skybrightness, lst=lst) field['slots_time'][ilst, isb] = field['slots_exposures'][ilst, isb] * xfactor * self.slots.duration field['xfactor'][ilst, isb] = xfactor self.field_array = field_array return(status) def tofits(self, filename=None): """Write field allocation array to a FITS file Parameters ---------- filename : str file name to write to Notes ------ Writes all array attributes as a binary table. """ hdr = robostrategy.header.rsheader() hdr.append({'name':'EPOVER', 'value':self.epoch_overhead, 'comment':'Epoch overhead assumed (hours)'}) fitsio.write(filename, self.field_array, header=hdr, clobber=True, extname='ALLOCATE') self.slots.tofits(filename=filename, clobber=False) fitsio.write(filename, self.fields, extname='FIELDS', clobber=False) fitsio.write(filename, self.field_slots, extname='FSLOTS', clobber=False) fitsio.write(filename, self.field_options, extname='OPTIONS', clobber=False) cartons_arr = np.zeros(len(self.cartons), dtype=[('carton', 'U50')]) cartons_arr['carton'] = self.cartons cadences_arr = np.zeros(len(self.cadences), dtype=[('cadences', 'U50')]) cadences_arr['cadences'] = self.cadences fitsio.write(filename, cartons_arr, extname='CARTONS', clobber=False) fitsio.write(filename, cadences_arr, extname='CADENCES', clobber=False) return def fromfits(self, filename=None): """Read field allocation array from a FITS file Parameters ---------- filename : str file name to write to Notes ------ Reads all attributes from a binary FITS table. """ self.field_array, hdr = fitsio.read(filename, header=True, ext=1) if('EPOVER' in hdr): self.epoch_overhead = np.float32(hdr['EPOVER']) self.slots = robostrategy.slots.Slots() self.slots.fromfits(filename, ext=2) self.fields = fitsio.read(filename, ext=3) self.field_slots = fitsio.read(filename, ext=4) self.field_options = fitsio.read(filename, ext=5) self.cartons = fitsio.read(filename, ext=6) return def _available_lst(self): available = self.slots.slots * self.slots.fclear return(available) def _used_lst(self): used = self.field_array['slots_time'][:, :, :].sum(axis=0) return(used) def _got_ra(self): got = np.zeros((self.slots.nlst, self.slots.nskybrightness), dtype=np.float32) for iskybrightness in np.arange(self.slots.nskybrightness): nfilled = self.field_array['slots_time'][:, :, iskybrightness].sum(axis=1) rahist, rabinedges = np.histogram(self.field_array['racen'] / 15., range=[0., 24.], weights=nfilled, bins=self.slots.nlst) got[:, iskybrightness] = rahist return(got) def _used_lst_cadence(self, iskybrightness=None): used = np.zeros(self.field_array['slots_time'][0, :, 0].shape, dtype=np.float32) for ifield, field in enumerate(self.field_array): issky = (self.cadencelist.cadences[field['cadence']].skybrightness == self.slots.skybrightness[iskybrightness + 1]) nsky = self.cadencelist.cadences[field['cadence']].nexp[issky].sum() if(nsky > 0): avgx = ((field['xfactor'] * field['slots_time']).sum() / field['slots_time'].sum()) skytime = nsky * self.slots.duration * avgx curr_used = field['slots_time'][:, iskybrightness] curr_used = skytime * curr_used / curr_used.sum() used = used + curr_used return(used) def plot_full(self, iskybrightness=None, title=None): """Plot the LST distributions for the allocations Parameters ---------- iskybrightness : ndarray of np.int32 indices of the sky brightness classes to plot title : str title to put on plot """ available = self._available_lst() used = self._used_lst() got = self._got_ra() useddark = None if(iskybrightness is None): used = used.sum(axis=1) available = available.sum(axis=1) got = got.sum(axis=1) else: used = used[:, iskybrightness] available = available[:, iskybrightness] got = got[:, iskybrightness] if(iskybrightness == 0): useddark = self._used_lst_cadence(iskybrightness=0) plt.plot(used, color='red', linewidth=3, alpha=0.6, label='Hours used per LST ({t:>3.1f} h)'.format(t=used.sum())) plt.plot(available, color='red', linewidth=1, label='Hours available per LST ({t:>.1f} h)'.format(t=available.sum())) print(used.sum()) print(available.sum()) plt.plot(got, color='blue', linewidth=3, alpha=0.6, label='Hours observed per RA ({t:>.1f} h)'.format(t=got.sum())) print(got.sum()) if(useddark is not None): plt.plot(useddark, color='green', linewidth=3, alpha=0.6, label='Dark cadence used per LST ({t:>.1f} h)'.format(t=useddark.sum())) print(useddark.sum()) plt.xlabel('LST or RA (hours)') plt.ylabel('Exposure hours') plt.ylim(np.array([-0.05, 1.2]) * np.array([got.max(), used.max(), available.max()]).max()) plt.legend(loc=1) if(title is not None): plt.title(title) return def _convert_radec(self, m, ra, dec): return m(((360. - ra) + 180.) % 360., dec, inverse=False) def plot_fields(self, indx=None, label=False, linear=False, colorbar=True, lon_0=270., darkorbright=None, **kwargs): """Plot the RA/Dec distribution of fields allocated Parameters ---------- indx : ndarray of np.int32 indices of fields to plot """ if(indx is None): indx = np.arange(len(self.field_array), dtype=np.int32) if basemap is None: raise ImportError('basemap was not imported. Is it installed?') m = basemap.Basemap(projection='moll', lon_0=lon_0, resolution='c', celestial=True) # draw parallels and meridians. m.drawparallels(np.arange(-90., 120., 30.), linewidth=0.5, labels=[1, 0, 0, 0], labelstyle='+/-') m.drawmeridians(np.arange(0., 420., 60.),
import json import os,sys,platform,time from base64 import b64encode from datetime import datetime import re import lxml import lxml.html import lxml.etree import html import urllib.parse from collections import OrderedDict from collections import Mapping from xmljson import BadgerFish import io def getResourceFolder(): if getattr(sys, 'frozen', False) and (platform.system() != 'Darwin'): folder = os.path.dirname(sys.executable) elif getattr(sys, 'frozen', False) and (platform.system() == 'Darwin'): folder = sys._MEIPASS else: folder = os.getcwd() return folder def hasDictValue(data,multikey, piped=False): try: multikey = multikey.split('|').pop(0) if piped else multikey keys=multikey.split('.',1) if isinstance(data, Mapping) and keys[0] != '': if len(keys) > 1: value = data.get(keys[0],"") value = hasDictValue(value,keys[1]) else: value = keys[0] in data if (not value) and (keys[0] == '*'): if len(keys) == 1: value = bool(data) else: listkey = keys[1] for elem in data: value = hasDictValue(data[elem], listkey) if value: break elif type(data) is list and keys[0] == '*': if len(keys) > 1 and len(data) > 0: value = data[0] value = hasDictValue(value,keys[1]) else: value = len(data) > 0 elif type(data) is list and keys[0].isnumeric(): no = int(keys[0]) if len(keys) > 1 and len(data) > no: value = data[no] value = hasDictValue(value,keys[1]) else: value = len(data) > no else: value = False return value except Exception as e: return False def extractNames(customcolumns = []): """Extract name contained in keys """ names = [] for column in customcolumns: name = tokenize_with_escape(str(column)).pop(0).split('=', 1) name = column if len(name) < 2 else name[0] names.append(name) return names def parseKey(key): pipeline = tokenize_with_escape(key) key = pipeline.pop(0).split('=', 1) name = key.pop(0) if len(key) > 1 else None key = key[0] return (name, key, pipeline) def hasValue(data,key): name, value = extractValue(data, key, False) if (value is None): return False elif (value == False): return False elif (type(value) is list) and (len(value) == 0): return False else: return True def extractValue(data, key, dump=True, folder="", default=''): """Extract value from dict and pipe through modifiers :param data: :param multikey: :param dump: :return: """ try: # Parse key name, key, pipeline = parseKey(key) # Input: dict. Output: string, number, list or dict value = getDictValue(data, key, dump, default) for idx, modifier in enumerate(pipeline): value = value if type(value) is list else [value] if modifier.startswith('json:'): # Input: list of strings. # Output if dump==True: list of strings # Output if dump==False: list of dict, list, string or number selector = modifier[5:] # Flatten list if not dumped if dump: value = [getDictValue(json.loads(x), selector, dump=dump) for x in value] else: items = [getDictValue(json.loads(x), selector, dump=dump) for x in value] value = [] for item in items: if type(item) is list: value += item else: value.append(item) elif modifier.startswith('not:'): selector = modifier[4:] check = [x == selector for x in value] value = not any(check) elif modifier.startswith('is:'): selector = modifier[3:] check = [x == selector for x in value] value = any(check) elif modifier.startswith('re:'): # Input: list of strings. # Output: list of strings selector = modifier[3:] items = [re.findall(selector,x) for x in value] # Flatten (first group in match if re.findall returns multiple groups) value = [] for matches in items: for match in matches: if (type(match) is tuple): value.append(match[0]) else: value.append(match) elif modifier.startswith('css:'): # Input: list of strings. # Output: list of strings selector = modifier[4:] value = [extractHtml(x, selector, type='css') for x in value] value = [y for x in value for y in x] elif modifier.startswith('xpath:'): # Input: list of strings. # Output: list of strings selector = modifier[6:] value = [extractHtml(x, selector, type='xpath') for x in value] value = [y for x in value for y in x] # Load file contents (using modifiers after a pipe symbol) elif modifier == 'file': value = value[0] with open(os.path.join(folder, value), 'rb') as file: value = file.read() elif modifier == 'base64': value = value[0] value = b64encode(value.encode('utf-8')).decode('utf-8') elif modifier == 'length': value = len(value) elif modifier == "timestamp": value = [datetime.fromtimestamp(int(x)).isoformat() for x in value] elif modifier == "shortdate": value = [str(datetime.strptime(x, '%a %b %d %H:%M:%S %z %Y')) for x in value] # If modified in pipeline (otherwise already handled by getDictValue)... if dump and (type(value) is dict): value = json.dumps(value) if dump and (type(value) is list): value = ";".join(value) elif dump and (isinstance(value, int)): value = str(value) return (name, value) except Exception as e: return (None, default) def getDictValue(data, multikey, dump=True, default = ''): """Extract value from dict :param data: :param multikey: :param dump: :param default: :return: """ try: keys=multikey.split('.',1) if isinstance(data, Mapping) and keys[0] != '': try: value=data[keys[0]] if len(keys) > 1: value = getDictValue(value,keys[1],dump, default) except: if keys[0] == '*': listkey = keys[1] if len(keys) > 1 else '' value=[] for elem in data: value.append(getDictValue(data[elem],listkey,dump, default)) else: value = default elif type(data) is list and keys[0] != '': try: value=data[int(keys[0])] if len(keys) > 1: value = getDictValue(value,keys[1],dump, default) except: if keys[0] == '*': listkey = keys[1] if len(keys) > 1 else '' else: listkey = keys[0] value=[] for elem in data: value.append(getDictValue(elem, listkey, dump, default)) elif keys[0] == '': value = data else: value = default if dump and (type(value) is dict): value = json.dumps(value) elif dump and (type(value) is list): value = ";".join(value) elif dump and (isinstance(value, int)): value = str(value) elif dump and (isinstance(value, float)): value = str(value) return value except Exception as e: return default def getDictValueOrNone(data, key, dump = True): if (key is None) or (key == ''): return None elif not (isinstance(data, dict)): return None elif not hasDictValue(data, key, piped=True): return None else: name, value = extractValue(data, key, dump=dump, default=None) value = None if (value == "") else value return value def filterDictValue(data, multikey, dump=True, piped=False): try: multikey = multikey.split('|').pop(0) if piped else multikey keys=multikey.split('.',1) if isinstance(data, Mapping) and keys[0] != '': value = { key: data[key] for key in list(data.keys()) if key != keys[0]} if len(keys) > 1: value[keys[0]] = filterDictValue(data[keys[0]],keys[1],False) if not len(value): value = None elif type(data) is list and keys[0] != '': try: value=data if len(keys) > 1: value[int(keys[0])] = getDictValue(value[int(keys[0])],keys[1],False) else: value[int(keys[0])] = '' except: if keys[0] == '*' and len(keys) > 1: listkey = keys[1] elif keys[0] == '*': listkey = '' else: listkey = keys[0] valuelist=[] for elem in data: valuelist.append(filterDictValue(elem,listkey,False)) value = valuelist else: value = '' if dump and (type(value) is dict or type(value) is list): return json.dumps(value) else: return value except Exception as e: return "" def recursiveIterKeys(value,prefix=None): for key in value.keys(): if type(value[key]) is dict: for subkey in recursiveIterKeys(value[key],key): fullkey = subkey if prefix is None else ".".join([prefix,subkey]) yield fullkey else: fullkey = key if prefix is None else ".".join([prefix,key]) yield fullkey def htmlToJson(data,csskey=None,type='lxml'): #type='html5' soup = lxml.html.fromstring(data) def parseSoup(element,context = True): out = {} if context: #out['name'] = element.tag if element.text is not None: out['text'] = str(element.text).strip("\n\t ") attributes= {} if context: for name, value in sorted(element.items()): attributes['@'+name] = value out.update(attributes) children = [] for child in element: if isinstance(child.tag, str): id = str(child.get('id','')) key = child.tag+'#'+id if id != '' else child.tag children.append({key:parseSoup(child)}) else: value = str(child.text).strip("\n\t ") if value != '': children.append({'text':value}) if len(children) > 0: out['items'] = children #simplify: if len(children) == 0 and len(attributes) ==0: out = out.get('text',None) elif len(children) > 0 and len(attributes) ==0 and out.get('text',None) is None: del out['items'] out = children return out output = [] if csskey is not None: for part in soup.cssselect(csskey): output.append(parseSoup(part,True)) else: output = {soup.tag : parseSoup(soup,True)} return output def elementToJson(element, context=True): out = {} if context: out['tag'] = element.tag out['text'] = element.text_content().strip("\r\n\t ") #if element.text is not None: # out['text'] = str(element.text).strip("\n\t ") attributes = {} if context: for name, value in sorted(element.items()): attributes['@' + name] = value out.update(attributes) # children = [] # for child in element: # if isinstance(child.tag, str): # id = str(child.get('id', '')) # key = child.tag + '#' + id if id != '' else child.tag # children.append({key: parseSoup(child)}) # else: # value = str(child.text).strip("\n\t ") # if value != '': # children.append({'text': value}) # # if len(children) > 0: # out['items'] = children