repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
maybelinot/findltr
findltr/utils.py
1
4673
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: maybelinot # @Email: [email protected] # @Date: 2015-09-12 16:06:18 # @Last Modified by: maybelinot # @Last Modified time: 2015-09-12 20:14:58 from __future__ import unicode_literals, absolute_import import logging import os import subprocess import sys import time # EXTERNALLY INSTALLED from BCBio import GFF from Bio import SeqIO, Seq, SeqRecord, SeqFeature from Bio.Blast import NCBIXML from Bio.Blast.Applications import NcbiblastnCommandline from io import StringIO import yaml # Load logging before anything else logging.basicConfig(format='>> %(message)s') logr = logging.getLogger('findltr') def export_gff(seq, young_lcp, outputfile): gff_output = outputfile or 'rec_%s.gff3' % time.time() logr.info('Found LTRs are saved in ' + gff_output) records = [] # fix name to chrN based on input seq gff = SeqRecord.SeqRecord(Seq.Seq(seq), "seq0") top_feature = [] for idx, item in enumerate(young_lcp): seq1 = SeqRecord.SeqRecord( Seq.Seq(seq[item[0][0]:item[0][1]]), id="seq1") seq2 = SeqRecord.SeqRecord( Seq.Seq(seq[item[1][0]:item[1][1]]), id="seq2") with open("/tmp/seq1.fasta", "w") as query: SeqIO.write(seq1, query, "fasta") with open("/tmp/seq2.fasta", "w") as subject: SeqIO.write(seq2, subject, "fasta") blast_output = NcbiblastnCommandline( query="/tmp/seq1.fasta", subject="/tmp/seq2.fasta", outfmt=5)()[0] blast_result_record = NCBIXML.read(StringIO(unicode(blast_output, "utf-8"))) identity = 0 for alignment in blast_result_record.alignments: for hsp in alignment.hsps: identity = max( hsp.identities / float(hsp.align_length) * 100.0, identity) identity = "%0.2f" % identity # cut zeros tail # identity = identity.rstrip("0") # identity = identity.rstrip(".") # improve seqfeatures appending sub_qualifiers_region = {"source": "ltrfind", "ID": "repeat_region" + str(idx + 1)} top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0] - 4, item[1][1] + 4), type="repeat_region", strand=0, qualifiers=sub_qualifiers_region)) sub_qualifiers_target_site = {"source": "ltrfind", "Parent": "repeat_region" + str(idx + 1)} top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0] - 4, item[0][0]), type="target_site_duplication", strand=0, qualifiers=sub_qualifiers_target_site)) sub_qualifiers = {"source": "ltrfind", "ID": "LTR_retrotransposon" + str(idx + 1), "Parent": "repeat_region" + str(idx + 1), "ltr_similarity": identity, "seq_number": "0"} top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0], item[1][1]), type="LTR_retrotransposon", strand=0, qualifiers=sub_qualifiers)) sub_qualifiers_ltrs = {"source": "ltrfind", "Parent": "LTR_retrotransposon" + str(idx + 1)} top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[0][0], item[0][1]), type="long_terminal_repeat", strand=0, qualifiers=sub_qualifiers_ltrs)) top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[1][0], item[1][1]), type="long_terminal_repeat", strand=0, qualifiers=sub_qualifiers_ltrs)) top_feature.append(SeqFeature.SeqFeature(SeqFeature.FeatureLocation(item[1][1], item[1][1] + 4), type="target_site_duplication", strand=0, qualifiers=sub_qualifiers_target_site)) gff.features = top_feature # track name='findltr' description='findltr Supplied Track' with open(gff_output, "w") as out_handle: GFF.write([gff], out_handle) def run(cmd): cmd = cmd if isinstance(cmd, list) else cmd.split() try: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as error: logr.error("'{0}' failed: {1}".format(cmd, error)) raise output, errors = process.communicate() if process.returncode != 0 or errors: if output: logr.error(output) if errors: logr.error(errors) sys.exit(process.returncode) return output, errors
gpl-3.0
-1,912,421,539,951,222,800
39.991228
114
0.598545
false
sailthru/stolos
stolos/examples/tasks/pyspark_example.py
1
2301
"""This example demonstrates how to incorporate your pyspark application with stolos's pyspark plugin. It's probably simpler to use the bash plugin instead. """ def main(elem, ns, **job_id_identifiers): """Stolos's pyspark plugin will call this function to begin the application The function parameters may be one of the below. The plugin will intelligently figure out what type of object you want to receive based on the function definition. def main(sc, ns, **job_id_identifiers): def main(textFile, ns, **job_id_identifiers): def main(elem, ns, **job_id_identifiers): `sc` - an instance of a spark context `textFile` - a pyspark RDD from a textFile, where the data loaded into the textFile RDD is determined by ns.read_fp `elem` - assume this application is a simple map operation that receives individual elements of an RDD. `ns` - an argparse.Namespace containing whatever argparse options you specified + the default ones provided by the pyspark plugin `job_id_identifiers` - a dictionary of extra keyword args that make up the job_id. (The job_id identifies what variation of work this application performs). * Note that `sc`, `textFile` and `elem` are mutually exclusive. They identify which specific api your pyspark application will use. """ # ... your code here. result = elem return result # And don't forget you would need to add this to the tasks graph: # "test_stolos/test_pyspark": { # "job_type": "pyspark", # "pymodule": "stolos.examples.tasks.pyspark_example" # } # # Then, to run it, there are two methods: # # 1. Queue a job in the task queue and then run the job # 2. Manually run a job (not recommended except for testing) # Option 1 looks like this: # ./bin/stolos-submit -a test_stolos/test_pyspark # --job_id 20140501_1_test # # stolos # -a test_stolos/test_pyspark --write_fp /tmp/alex --read_fp ./README.md # Option 2 bypasses scheduling and just runs a task + plugin directly. This # option is useful if you wish to verify that your code works with the plugin: # # python # -a test_stolos/test_pyspark --write_fp /tmp/alex --read_fp ./README.md # --bypass_scheduler --job_id 20140501_1_test
apache-2.0
-5,081,009,720,995,576,000
34.4
78
0.689266
false
hzlf/openbroadcast
website/djangorestframework/resources.py
1
15477
from django import forms from django.core.urlresolvers import reverse, get_urlconf, get_resolver, NoReverseMatch from django.db import models from django.db.models.query import QuerySet from django.db.models.fields.related import RelatedField from django.utils.encoding import smart_unicode from djangorestframework.response import ErrorResponse from djangorestframework.serializer import Serializer, _SkipField from djangorestframework.utils import as_tuple import decimal import inspect import re class BaseResource(Serializer): """ Base class for all Resource classes, which simply defines the interface they provide. """ fields = None include = None exclude = None def __init__(self, view=None, depth=None, stack=[], **kwargs): super(BaseResource, self).__init__(depth, stack, **kwargs) self.view = view def validate_request(self, data, files=None): """ Given the request content return the cleaned, validated content. Typically raises a :exc:`response.ErrorResponse` with status code 400 (Bad Request) on failure. """ return data def filter_response(self, obj): """ Given the response content, filter it into a serializable object. """ return self.serialize(obj) class Resource(BaseResource): """ A Resource determines how a python object maps to some serializable data. Objects that a resource can act on include plain Python object instances, Django Models, and Django QuerySets. """ # The model attribute refers to the Django Model which this Resource maps to. # (The Model's class, rather than an instance of the Model) model = None # By default the set of returned fields will be the set of: # # 0. All the fields on the model, excluding 'id'. # 1. All the properties on the model. # 2. The absolute_url of the model, if a get_absolute_url method exists for the model. # # If you wish to override this behaviour, # you should explicitly set the fields attribute on your class. fields = None class FormResource(Resource): """ Resource class that uses forms for validation. Also provides a :meth:`get_bound_form` method which may be used by some renderers. On calling :meth:`validate_request` this validator may set a :attr:`bound_form_instance` attribute on the view, which may be used by some renderers. """ form = None """ The :class:`Form` class that should be used for request validation. This can be overridden by a :attr:`form` attribute on the :class:`views.View`. """ def validate_request(self, data, files=None): """ Given some content as input return some cleaned, validated content. Raises a :exc:`response.ErrorResponse` with status code 400 (Bad Request) on failure. Validation is standard form validation, with an additional constraint that *no extra unknown fields* may be supplied. On failure the :exc:`response.ErrorResponse` content is a dict which may contain :obj:`'errors'` and :obj:`'field-errors'` keys. If the :obj:`'errors'` key exists it is a list of strings of non-field errors. If the :obj:`'field-errors'` key exists it is a dict of ``{'field name as string': ['errors as strings', ...]}``. """ return self._validate(data, files) def _validate(self, data, files, allowed_extra_fields=(), fake_data=None): """ Wrapped by validate to hide the extra flags that are used in the implementation. allowed_extra_fields is a list of fields which are not defined by the form, but which we still expect to see on the input. fake_data is a string that should be used as an extra key, as a kludge to force .errors to be populated when an empty dict is supplied in `data` """ # We'd like nice error messages even if no content is supplied. # Typically if an empty dict is given to a form Django will # return .is_valid() == False, but .errors == {} # # To get around this case we revalidate with some fake data. if fake_data: data[fake_data] = '_fake_data' allowed_extra_fields = tuple(allowed_extra_fields) + ('_fake_data',) bound_form = self.get_bound_form(data, files) if bound_form is None: return data self.view.bound_form_instance = bound_form data = data and data or {} files = files and files or {} seen_fields_set = set(data.keys()) form_fields_set = set(bound_form.fields.keys()) allowed_extra_fields_set = set(allowed_extra_fields) # In addition to regular validation we also ensure no additional fields are being passed in... unknown_fields = seen_fields_set - (form_fields_set | allowed_extra_fields_set) unknown_fields = unknown_fields - set(('csrfmiddlewaretoken', '_accept', '_method')) # TODO: Ugh. # Check using both regular validation, and our stricter no additional fields rule if bound_form.is_valid() and not unknown_fields: # Validation succeeded... cleaned_data = bound_form.cleaned_data # Add in any extra fields to the cleaned content... for key in (allowed_extra_fields_set & seen_fields_set) - set(cleaned_data.keys()): cleaned_data[key] = data[key] return cleaned_data # Validation failed... detail = {} if not bound_form.errors and not unknown_fields: # is_valid() was False, but errors was empty. # If we havn't already done so attempt revalidation with some fake data # to force django to give us an errors dict. if fake_data is None: return self._validate(data, files, allowed_extra_fields, '_fake_data') # If we've already set fake_dict and we're still here, fallback gracefully. detail = {u'errors': [u'No content was supplied.']} else: # Add any non-field errors if bound_form.non_field_errors(): detail[u'errors'] = bound_form.non_field_errors() # Add standard field errors field_errors = dict( (key, map(unicode, val)) for (key, val) in bound_form.errors.iteritems() if not key.startswith('__') ) # Add any unknown field errors for key in unknown_fields: field_errors[key] = [u'This field does not exist.'] if field_errors: detail[u'field-errors'] = field_errors # Return HTTP 400 response (BAD REQUEST) raise ErrorResponse(400, detail) def get_form_class(self, method=None): """ Returns the form class used to validate this resource. """ # A form on the view overrides a form on the resource. form = getattr(self.view, 'form', None) or self.form # Use the requested method or determine the request method if method is None and hasattr(self.view, 'request') and hasattr(self.view, 'method'): method = self.view.method elif method is None and hasattr(self.view, 'request'): method = self.view.request.method # A method form on the view or resource overrides the general case. # Method forms are attributes like `get_form` `post_form` `put_form`. if method: form = getattr(self, '%s_form' % method.lower(), form) form = getattr(self.view, '%s_form' % method.lower(), form) return form def get_bound_form(self, data=None, files=None, method=None): """ Given some content return a Django form bound to that content. If form validation is turned off (:attr:`form` class attribute is :const:`None`) then returns :const:`None`. """ form = self.get_form_class(method) if not form: return None if data is not None or files is not None: return form(data, files) return form() #class _RegisterModelResource(type): # """ # Auto register new ModelResource classes into ``_model_to_resource`` # """ # def __new__(cls, name, bases, dct): # resource_cls = type.__new__(cls, name, bases, dct) # model_cls = dct.get('model', None) # if model_cls: # _model_to_resource[model_cls] = resource_cls # return resource_cls class ModelResource(FormResource): """ Resource class that uses forms for validation and otherwise falls back to a model form if no form is set. Also provides a :meth:`get_bound_form` method which may be used by some renderers. """ # Auto-register new ModelResource classes into _model_to_resource #__metaclass__ = _RegisterModelResource form = None """ The form class that should be used for request validation. If set to :const:`None` then the default model form validation will be used. This can be overridden by a :attr:`form` attribute on the :class:`views.View`. """ model = None """ The model class which this resource maps to. This can be overridden by a :attr:`model` attribute on the :class:`views.View`. """ fields = None """ The list of fields to use on the output. May be any of: The name of a model field. To view nested resources, give the field as a tuple of ("fieldName", resource) where `resource` may be any of ModelResource reference, the name of a ModelResourc reference as a string or a tuple of strings representing fields on the nested model. The name of an attribute on the model. The name of an attribute on the resource. The name of a method on the model, with a signature like ``func(self)``. The name of a method on the resource, with a signature like ``func(self, instance)``. """ exclude = ('id', 'pk') """ The list of fields to exclude. This is only used if :attr:`fields` is not set. """ include = ('url',) """ The list of extra fields to include. This is only used if :attr:`fields` is not set. """ def __init__(self, view=None, depth=None, stack=[], **kwargs): """ Allow :attr:`form` and :attr:`model` attributes set on the :class:`View` to override the :attr:`form` and :attr:`model` attributes set on the :class:`Resource`. """ super(ModelResource, self).__init__(view, depth, stack, **kwargs) self.model = getattr(view, 'model', None) or self.model def validate_request(self, data, files=None): """ Given some content as input return some cleaned, validated content. Raises a :exc:`response.ErrorResponse` with status code 400 (Bad Request) on failure. Validation is standard form or model form validation, with an additional constraint that no extra unknown fields may be supplied, and that all fields specified by the fields class attribute must be supplied, even if they are not validated by the form/model form. On failure the ErrorResponse content is a dict which may contain :obj:`'errors'` and :obj:`'field-errors'` keys. If the :obj:`'errors'` key exists it is a list of strings of non-field errors. If the ''field-errors'` key exists it is a dict of {field name as string: list of errors as strings}. """ return self._validate(data, files, allowed_extra_fields=self._property_fields_set) def get_bound_form(self, data=None, files=None, method=None): """ Given some content return a ``Form`` instance bound to that content. If the :attr:`form` class attribute has been explicitly set then that class will be used to create the Form, otherwise the model will be used to create a ModelForm. """ form = self.get_form_class(method) if not form and self.model: # Fall back to ModelForm which we create on the fly class OnTheFlyModelForm(forms.ModelForm): class Meta: model = self.model #fields = tuple(self._model_fields_set) form = OnTheFlyModelForm # Both form and model not set? Okay bruv, whatevs... if not form: return None # Instantiate the ModelForm as appropriate if data is not None or files is not None: if issubclass(form, forms.ModelForm) and hasattr(self.view, 'model_instance'): # Bound to an existing model instance return form(data, files, instance=self.view.model_instance) else: return form(data, files) return form() def url(self, instance): """ Attempts to reverse resolve the url of the given model *instance* for this resource. Requires a ``View`` with :class:`mixins.InstanceMixin` to have been created for this resource. This method can be overridden if you need to set the resource url reversing explicitly. """ if not hasattr(self, 'view_callable'): raise _SkipField # dis does teh magicks... urlconf = get_urlconf() resolver = get_resolver(urlconf) possibilities = resolver.reverse_dict.getlist(self.view_callable[0]) for tuple_item in possibilities: possibility = tuple_item[0] # pattern = tuple_item[1] # Note: defaults = tuple_item[2] for django >= 1.3 for result, params in possibility: #instance_attrs = dict([ (param, getattr(instance, param)) for param in params if hasattr(instance, param) ]) instance_attrs = {} for param in params: if not hasattr(instance, param): continue attr = getattr(instance, param) if isinstance(attr, models.Model): instance_attrs[param] = attr.pk else: instance_attrs[param] = attr try: return reverse(self.view_callable[0], kwargs=instance_attrs) except NoReverseMatch: pass raise _SkipField @property def _model_fields_set(self): """ Return a set containing the names of validated fields on the model. """ model_fields = set(field.name for field in self.model._meta.fields) if fields: return model_fields & set(as_tuple(self.fields)) return model_fields - set(as_tuple(self.exclude)) @property def _property_fields_set(self): """ Returns a set containing the names of validated properties on the model. """ property_fields = set(attr for attr in dir(self.model) if isinstance(getattr(self.model, attr, None), property) and not attr.startswith('_')) if self.fields: return property_fields & set(as_tuple(self.fields)) return property_fields.union(set(as_tuple(self.include))) - set(as_tuple(self.exclude))
gpl-3.0
-5,236,262,275,512,934,000
36.841076
277
0.616528
false
asttra/pysces
pysces/PyscesLink.py
1
52634
""" PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net) Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved, Brett G. Olivier ([email protected]) Triple-J Group for Molecular Cell Physiology Stellenbosch University, South Africa. Permission to use, modify, and distribute this software is given under the terms of the PySceS (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. Brett G. Olivier """ from pysces.version import __version__ __doc__ = ''' PyscesLink ---------- Interfaces to external software and API's, has replaced the PySCeS contrib classes. ''' # for METATOOLlink import os, re, cStringIO # for SBWWebLink import urllib, urllib2, getpass class SBWlink(object): """Generic access for local SBW services using SBWPython """ sbw = None psbw = None sbwModuleProxy = None moduleDict = None modules = None def __init__(self): try: import SBW as SBW import SBW.psbw as psbw ## reload(SBW) ## reload(psbw) self.sbw = SBW self.psbw = SBW.psbw self.sbwModuleProxy = SBW.sbwModuleProxy self.moduleDict = SBW.sbwModuleProxy.moduleDict self.modules = [] for m in self.moduleDict: if self.moduleDict[m].pythonName not in ['python']: self.SBW_exposeAll(self.moduleDict[m]) self.modules.append(self.moduleDict[m].pythonName) setattr(self, self.moduleDict[m].pythonName, self.moduleDict[m]) print '\nSBWlink established.' except Exception, ex: print ex print '\nSBWlink not established.' def SBW_exposeAll(self, module): for s in module.services: s = getattr(module, s) for m in s.methods: getattr(s, m) def SBW_getActiveModules(self): idlist = [] namelst = [] for id in self.psbw.getModuleIdList(): idlist.append(id) namelst.append(self.psbw.getModuleName(id)) for id in self.moduleDict.keys(): if id not in idlist: self.moduleDict.pop(id) for name in range(len(self.modules)-1,-1,-1): if self.modules[name] not in namelst: delattr(self, self.modules[name]) self.modules.pop(name) for name in namelst: if name not in self.modules: self.SBW_loadModule(name) return namelst def SBW_loadModule(self, module_name): ans = 'Y' if module_name[-3:] == 'GUI': ans = raw_input('Warning! This may hang the console\n\yPress \'Y\' to continue: ') if ans == 'Y': module_id = self.psbw.SBWGetModuleInstance(module_name) assert module_id != None, '\nUnknow module, %s' % module_name module = self.sbwModuleProxy.ModuleProxy(module_id) self.SBW_exposeAll(module) if not self.moduleDict.has_key(module_id): print '<PySCeS_SBW> Adding ' + module.pythonName + ' to ModuleProxy (id=' + str(module_id) + ')' self.moduleDict.update({module_id : module}) if module.pythonName not in self.modules: print '<PySCeS_SBW> Adding ' + module.pythonName + ' to SBWlink' self.modules.append(module.pythonName) setattr(self, module.pythonName, module) else: print '\nModule %s not loaded' % module_name class SBWLayoutWebLink(object): """Enables access to DrawNetwork and SBMLLayout web services at www.sys-bio.org""" sbwhost = '128.208.17.26' sbml = None sbmllayout = None svg = None DEBUGMODE = False DEBUGLEVEL = 1 DRAWNETWORKLOADED = False LAYOUTMODULELOADED = False def setProxy(self, **kwargs): """Set as many proxy settings as you need. You may supply a user name without a password in which case you will be prompted to enter one (once) when required (NO guarantees, implied or otherwise, on password security AT ALL). Arguments can be: user = 'daUser', pwd = 'daPassword', host = 'proxy.paranoid.net', port = 3128 """ proxy_info = {} for k in kwargs.keys(): proxy_info.update({k : kwargs[k]}) if proxy_info.has_key('user') and not proxy_info.has_key('pwd'): proxy_info.update({'pwd' : getpass.getpass()}) proxy_support = urllib2.ProxyHandler({"http" : "http://%(user)s:%(pwd)s@%(host)s:%(port)d" % proxy_info}) opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler) urllib2.install_opener(opener) del proxy_info, proxy_support def loadSBMLFileFromDisk(self, File, Dir=None): if Dir != None: path = os.path.join(Dir, File) else: path = File if os.path.exists(path): self.sbmllayout = None self.svg = None self.DRAWNETWORKLOADED = False self.LAYOUTMODULELOADED = False sbmlF = file(path, 'r') self.sbml = sbmlF.read() sbmlF.close() return True else: print "%s is an invalid path" % path return False def loadSBMLFromString(self, str): self.sbmllayout = None self.svg = None self.DRAWNETWORKLOADED = False self.LAYOUTMODULELOADED = False self.sbml = str return True def urlGET(self, host, urlpath): url = 'http://%s%s' % (host,urlpath) con = urllib2.urlopen(url) resp = con.read() if self.DEBUGMODE: print con.headers if self.DEBUGMODE and self.DEBUGLEVEL == 2: print resp con.close() return resp def urlPOST(self, host, urlpath, data): assert type(data) == dict, '\nData must be a dictionary' url = 'http://%s%s' % (host, urlpath) con = urllib2.urlopen(url, urllib.urlencode(data)) resp = con.read() if self.DEBUGMODE: print con.headers if self.DEBUGMODE and self.DEBUGLEVEL == 2: print resp con.close() return resp def getVersion(self): print 'Inspector.getVersion()' ver = self.urlGET(self.sbwhost, '/generate/Inspector.asmx/getVersion') ver = ver.replace('<?xml version="1.0" encoding="utf-8"?>','') ver = ver.replace('<string xmlns="http://www.sys-bio.org/">','') ver = ver.replace('</string>','') return ver def drawNetworkLoadSBML(self): print 'DrawNetwork.loadSBML()' assert self.sbml != None, '\nNo SBML file loaded' data = {'var0' : self.sbml} self.DRAWNETWORKLOADED = True return self.urlPOST(self.sbwhost, '/generate/DrawNetwork.asmx/loadSBML', data) def drawNetworkGetSBMLwithLayout(self): print 'DrawNetwork.getSBML()' assert self.DRAWNETWORKLOADED, '\nSBML not loaded into DrawNetwork module' sbml = self.urlGET(self.sbwhost, '/generate/DrawNetwork.asmx/getSBML') sbml = sbml.replace('&gt;','>') sbml = sbml.replace('&lt;','<') sbml = sbml.replace('''<string xmlns="http://www.sys-bio.org/"><?xml version="1.0" encoding="utf-8"?>''','') sbml = sbml.replace('</string>','') self.sbmllayout = sbml return True def layoutModuleLoadSBML(self): print 'SBMLLayoutModule.loadSBML()' assert self.sbmllayout != None, '\nNo SBML Layout loaded' data = {'var0' : self.sbmllayout} self.LAYOUTMODULELOADED = True return self.urlPOST(self.sbwhost, '/generate/SBMLLayoutModule.asmx/loadSBML', data) def layoutModuleGetSVG(self): assert self.LAYOUTMODULELOADED, '\nSBML not loaded into SBMLLayout module' svg = self.urlGET(self.sbwhost, '/generate/SBMLLayoutModule.asmx/getSVG') svg = svg.replace('&gt;','>') svg = svg.replace('&lt;','<') svg = svg.replace('''<string xmlns="http://www.sys-bio.org/">''','') svg = svg.replace('''<?xml version="1.0" encoding="utf-8"?>''','') svg = svg.replace('</string>','') self.svg = svg return True def getSBML(self): return self.sbml def getSBMLlayout(self): return self.sbmllayout def getSVG(self): return self.svg class METATOOLlink(object): """New interface to METATOOL binaries""" __metatool_path__ = None __mod__ = None __emode_exe_int__ = None __emode_exe_dbl__ = None __emode_intmode__ = 0 __emode_userout__ = 0 __emode_file__ = None __metatool_file__ = None #EModes = '' def __init__(self, mod, __metatool_path__=None): # Initialise elementary modes self.__mod__ = mod if __metatool_path__ == None: self.__metatool_path__ = os.path.join(mod.__pysces_directory__, 'metatool') else: self.__metatool_path__ = os.path.join(__metatool_path__, 'metatool') assert self.__metatool_path__ != None, '\nPySCeS not found' self.__emode_file__ = self.__mod__.ModelFile[:-4] + '_emodes' self.__metatool_file__ = self.__mod__.ModelFile[:-4] + '_metatool' if os.sys.platform == 'win32': self.__emode_exe_int__ = os.path.join(self.__metatool_path__,'meta43_int.exe') self.__emode_exe_dbl__ = os.path.join(self.__metatool_path__,'meta43_double.exe') else: self.__emode_exe_int__ = os.path.join(self.__metatool_path__,'meta43_int') self.__emode_exe_dbl__ = os.path.join(self.__metatool_path__,'meta43_double') if os.path.exists(self.__emode_exe_int__): print 'Using METATOOL int', self.__emode_intmode__ = True else: self.__emode_exe_int__ = None if os.path.exists(self.__emode_exe_dbl__): print '\b\b\b\bdbl' self.__emode_intmode__ = False else: self.__emode_exe_dbl__ = None assert self.__emode_exe_dbl__ != None or self.__emode_exe_int__ != None, "\nMETATOOL binaries not available" def doEModes(self): """ doEModes() Calculate the elementary modes by way of an interface to MetaTool. METATOOL is a C program developed from 1998 to 2000 by Thomas Pfeiffer (Berlin) in cooperation with Stefan Schuster and Ferdinand Moldenhauer (Berlin) and Juan Carlos Nuno (Madrid). http://www.biologie.hu-berlin.de/biophysics/Theory/tpfeiffer/metatool.html Arguments: None """ print 'METATOOL is a C program developed from 1998 to 2000 by Thomas Pfeiffer (Berlin)' print 'in cooperation with Stefan Schuster and Ferdinand Moldenhauer (Berlin) and Juan Carlos Nuno (Madrid).' print 'http://www.biologie.hu-berlin.de/biophysics/Theory/tpfeiffer/metatool.html' goMode = 0 fileIn = 'pysces_metatool.dat' fileOut = 'pysces_metatool.out' goMode = 1 if goMode == 1: # Build MetaTool input file File = open(os.path.join(self.__mod__.ModelOutput,fileIn),'w') # Determine type of reaction out1 = [] for key in self.__mod__.__nDict__: #print key #print self.__mod__.__nDict__[key]['Type'] out1.append((key,self.__mod__.__nDict__[key]['Type'])) #print '\nExtracting metatool information from network dictionary ...\n' File.write('-ENZREV\n') for x in out1: if x[1] == 'Rever': File.write(x[0] + ' ') File.write('\n\n') File.write('-ENZIRREV\n') for x in out1: if x[1] == 'Irrev': File.write(x[0] + ' ') File.write('\n\n') File.write('-METINT\n') for x in self.__mod__.__species__: File.write(x + ' ') File.write('\n\n') File.write('-METEXT\n') for x in self.__mod__.__fixed_species__: File.write(x + ' ') File.write('\n\n') output = [] allInt = 1 for x in self.__mod__.__nDict__: reList = self.__mod__.__nDict__[x]['Reagents'] subs = '' prods = '' #print 'Reaction: ' + x for y in reList: if self.__emode_intmode__ == 1: # use int elementary modes if abs(int(reList[y]))/abs(float(reList[y])) != 1.0: print 'INFO: Coefficient not integer = ' + `reList[y]` allInt = 0 if reList[y] < 0: #print y.replace('self.','') + ' : substrate' if abs(int(reList[y])) != 1: subs += `abs(int(reList[y]))` + ' ' subs += y.replace('self.','') subs += ' + ' else: #print y.replace('self.','') + ' : product ' if abs(int(reList[y])) != 1: prods += `abs(int(reList[y]))` + ' ' prods += y.replace('self.','') prods += ' + ' #output.append(x + ' : ' + subs[:-3] + ' = ' + prods[:-3] + ' .') else: # use float/double elementary mode if reList[y] < 0.0: #print y.replace('self.','') + ' : substrate' if abs(float(reList[y])) != 1.0: subs += `abs(float(reList[y]))` + ' ' subs += y.replace('self.','') subs += ' + ' else: #print y.replace('self.','') + ' : product ' if abs(float(reList[y])) != 1.0: prods += `abs(float(reList[y]))` + ' ' prods += y.replace('self.','') prods += ' + ' output.append(x + ' : ' + subs[:-3] + ' = ' + prods[:-3] + ' .') File.write('-CAT\n') for x in output: File.write(x + '\n') File.write('\n') File.flush() File.close() if allInt == 1: if self.__emode_intmode__ == 1: eModeExe = self.__emode_exe_int__ else: eModeExe = self.__emode_exe_dbl__ print '\nMetatool running ...\n' ######### UPDATE: # Actually works fine on windows and posix - johann 20081128 print 'Generic run' os.spawnl(os.P_WAIT, eModeExe, eModeExe, os.path.join(self.__mod__.ModelOutput,fileIn), os.path.join(self.__mod__.ModelOutput,fileOut)) print '\nMetatool analysis complete\n' # Parse MetaTool output file and store the result in a string go = 0 go2 = 0 result = '' end = '' try: file2 = open(os.path.join(self.__mod__.ModelOutput,fileOut), 'r') for line in file2: c = re.match('ELEMENTARY MODES',line) d = re.match(' enzymes',line) e = re.match('The elementary mode',line) f = re.match('\n',line) g = re.match('The elementary',line) if c != None: go = 1 go2 = 0 if d != None: go2 = 1 if e != None: go2 = 0 if go == 1 and go2 == 1 and f == None: line = line.replace('reversible','\n reversible\n') line = line.replace('ir\n ','\n ir') if self.__emode_intmode__ == 1: line = line.replace('] ',']\n ') else: line = line.replace(') ',')\n ',1) result += line if go == 1 and g != None: end += line result += end result += '\n' file2.close() if self.__emode_userout__ == 1: fileo = open(os.path.join(self.__mod__.ModelOutput,self.__metatool_file__) + '.in','w') filer = open(os.path.join(self.__mod__.ModelOutput,fileIn),'r') for line in filer: fileo.write(line) fileo.write('\n\n') filer.close() fileo.close() filer = open(os.path.join(self.__mod__.ModelOutput,fileOut),'r') fileo = open(os.path.join(self.__mod__.ModelOutput,self.__metatool_file__) + '.out','w') for line in filer: fileo.write(line) filer.close() fileo.close() os.remove(os.path.join(self.__mod__.ModelOutput,fileIn)) os.remove(os.path.join(self.__mod__.ModelOutput,fileOut)) except Exception, EX: print 'doEmode:', EX print 'WARNING: Unable to open MetaTool output file\nPlease check the MetaTool executables: ' if os.name == 'posix': print '/MetaTool/meta43_double /MetaTool/meta43_int\nand their permissions' else: print '/MetaTool/meta43_double.exe /MetaTool/meta43_int.exe' else: print '\nINFO: non-integer coefficients\ \nTry using the double eMode function: self.__emode_intmode__=0' result = 'Elementary modes not calculated\n' else: print '\nNo elementary mode calculation possible - no meta43_xxx.exe' result = 'Elementary modes not calculated\n' self.EModes = result def getEModes(self): """ getEModes() Returns the elementary modes as a linked list of fluxes """ try: a = self.EModes FF = cStringIO.StringIO() FF.write(self.EModes) FF.reset() output = [] for line in FF: if re.match(' ',line) and not re.match(' reversible',line) and not re.match(' irreversible',line): tmp = [el for el in line.replace('\n','').split(' ') if el != ''] tmpOut = [] skip = False for el in range(len(tmp)): if skip: skip = False elif tmp[el][0] != '(': tmpOut.append(tmp[el]) elif tmp[el][0] == '(': tmpOut.append(tmp[el]+')'+tmp[el+1][:-1]) skip = True output.append(tmpOut) return output except AttributeError, atx: print atx print '\nINFO: Please run doEModes() first\n' def showEModes(self,File=None): """ showEModes(File=None) Print the results of an elementary mode analysis, generated with doEModes(), to screen or file. Arguments: File [default=None]: Boolean, if True write parsed elementary modes to file """ try: if File != None: #assert type(File) == file, 'showEmodes() needs an open file object' print '\nElementary modes written to file\n' f = open(os.path.join(self.__mod__.ModelOutput,self.__emode_file__ + '.out'),'w') f.write('\n## Elementary modes\n') f.write(self.EModes) f.close() else: print '\nElementary modes\n' print self.EModes except AttributeError, atx: print atx print '\nINFO: Please run doEModes() first\n' #stochsim link ''' _HAVE_STOMPY = False _STOMPY_LOAD_ERROR = '' try: ## import stompy import stochpy as stompy _HAVE_STOMPY = True except Exception, ex: _STOMPY_LOAD_ERROR = '%s' % ex _HAVE_STOMPY = False class StomPyInterface(object): """ StomPy interface to PySCeS this may move to pysces.link in the future """ SSA = None SSA_REACTIONS = None SSA_SPECIES = None stompy = None _MOD2PSC = None TMP_FNAME = None TMP_PATH = None MODEL_PATH = None OUTPUT_PATH = None STP_IS_TIME_SIM = False STP_METHOD = 'Direct' STP_TIMEEND = 1 STP_TRAJ = 1 STP_INTERACTIVE = True STP_TRACK_PROPENSITIES = True STP_WAITING_TIMES = True STP_STEPS = 10 STP_INITIAL_SPECIES = True STP_KEEP_PSC_FILES = False def __init__(self, model_path, output_path): """ An interface class to the StomPy stochastic simulator - *model_path* the default PySCeS model directory - *output_path* the default PySCeS output directory """ self.stompy = stompy self.OUTPUT_PATH = output_path self.MODEL_PATH = model_path self.TMP_PATH = os.path.join(model_path, 'orca') self._MOD2PSC = interface.writeMod2PSC def setProperty(self, **kwargs): """ Sets a StomPy simulation parameter - *method* [default='Direct'] select simulation algorithm - *trajectories* [default=1] - *interactive* [default=True] - *track_propensities* [default=True] - *steps* [default=10] """ ## print kwargs if kwargs.has_key('method'): self.STP_METHOD = kwargs['method'] ## print '%s = %s' % ('method', kwargs['method']) if kwargs.has_key('trajectories'): self.STP_TRAJ = kwargs['trajectories'] self.STP_TRAJ = 1 # TODO I need to look into this ## print 'Currently only single trajectories are supported via the PySCeS interface' ## print '%s = %s' % ('trajectories', self.STP_TRAJ) if kwargs.has_key('interactive'): self.STP_INTERACTIVE = kwargs['interactive'] ## print '%s = %s' % ('interactive', kwargs['interactive']) if kwargs.has_key('track_propensities'): self.STP_TRACK_PROPENSITIES = kwargs['track_propensities'] ## print '%s = %s' % ('track_propensities', kwargs['track_propensities']) if kwargs.has_key('steps'): self.STP_STEPS = kwargs['steps'] ## print '%s = %s' % ('steps', kwargs['steps']) if kwargs.has_key('species_initial'): self.STP_INITIAL_SPECIES = kwargs['initial_species'] ## print '%s = %s' % ('initial_species', kwargs['initial_species']) if kwargs.has_key('keep_psc_files'): self.STP_KEEP_PSC_FILES = kwargs['keep_psc_files'] ## print '%s = %s' % ('keep_psc_files', kwargs['keep_psc_files']) def initModelFromMod(self, pscmod, iValues=False): """ Initialise a StomPy SSA instance from a PySCeS model. - *pscmod* an initialised PySCeS model - *iValues* [default=False] use initial values (not current) """ self.TMP_FNAME = str(time.time()).replace('.','')+'.psc' if self.STP_INITIAL_SPECIES: for s in pscmod.species: setattr(pscmod, s, pscmod.__sDict__[s]['initial']) self._MOD2PSC(pscmod, self.TMP_FNAME, self.TMP_PATH, iValues=iValues) self.SSA = self.stompy.SSA(Method=self.STP_METHOD, File=self.TMP_FNAME, dir=self.TMP_PATH, IsInteractive=self.STP_INTERACTIVE) self.SSA.Trajectories(self.STP_TRAJ) self.SSA_REACTIONS = self.SSA.SSA.rate_names self.SSA_SPECIES = self.SSA.SSA.species if self.STP_TRACK_PROPENSITIES: self.SSA.TrackPropensities() try: print os.path.join(self.TMP_PATH, self.TMP_FNAME) if not self.STP_KEEP_PSC_FILES and self.TMP_PATH != None and self.TMP_FNAME != None: os.remove(os.path.join(self.TMP_PATH, self.TMP_FNAME)) except: print 'Could not delete intermediatery StomPy PSC file: %s' % os.path.join(self.TMP_PATH, self.TMP_FNAME) self.TMP_FNAME = None print 'StomPy model ... initialised.' def runTimeSimulation(self, pscmod, endtime=None, method='Direct', iValues=False): """ Run a stochastic simulation - *pscmod* and instanitiated PySCeS model - *endtime* [default=1] the end time **Note: this could take a long time i.e. generate ahuge amount of steps** - *method* [default='Direct'] select the simulation method, one of: - *Direct* - *FirstReactionMethod* - *NextReactionMethod* - *TauLeaping* - *iValues* [default=False] use initial values (not current) """ if method not in ['Direct','FirstReactionMethod','NextReactionMethod','TauLeaping']: print 'Method: %s does not exist using - Direct' % method self.STP_METHOD = 'Direct' else: self.STP_METHOD = method if endtime != None: self.STP_TIMEEND = endtime self.initModelFromMod(pscmod, iValues=iValues) ## self.SSA.Timesteps(self.STP_STEPS) self.SSA.Endtime(self.STP_TIMEEND) self.SSA.Run() self.STP_IS_TIME_SIM = True ## self.SSA.PlotTimeSim() print 'StomPy time simulation ... done.' # TODO STOCHPY ## if self.SSA.SSA.output[0][-1] == '': ## self.SSA.SSA.output[0][-1] = 0.0 ## sim_dat = numpy.array(self.SSA.SSA.output, 'd') ## pscmod.data_stochsim = IntegrationStochasticDataObj() ## pscmod.data_stochsim.setTime(sim_dat[:,0]) ## pscmod.data_stochsim.setSpecies(sim_dat[:,1:-1], self.SSA_SPECIES) pscmod.data_stochsim = self.SSA.data_stochsim if self.STP_WAITING_TIMES: wtimes, wt_lbls = self.getWaitingtimesData(reactions=None,lbls=True) pscmod.data_stochsim.setWaitingtimes(wtimes, wt_lbls) if self.STP_TRACK_PROPENSITIES: pscmod.data_stochsim.setPropensities(self.SSA.SSA.propensities_output) pscmod.data_stochsim.TYPE_INFO = 'Stochastic' def runStepSimulation(self, pscmod, steps=None, method='Direct', iValues=False): """ Run a stochastic simulation - *pscmod* and instanitiated PySCeS model - *steps* [default=10] the number of steps to simulate - *method* [default='Direct'] select the simulation method, one of: - *Direct* - *FirstReactionMethod* - *NextReactionMethod* - *TauLeaping* - *iValues* [default=False] use initial values (not current) """ if method not in ['Direct','FirstReactionMethod','NextReactionMethod','TauLeaping']: print 'Method: %s does not exist using - Direct' % method self.STP_METHOD = 'Direct' else: self.STP_METHOD = method if steps != None: self.STP_STEPS = steps self.initModelFromMod(pscmod, iValues=iValues) self.SSA.Timesteps(self.STP_STEPS) ## self.SSA.Endtime(self.STP_TIMEEND) self.SSA.Run() self.STP_IS_TIME_SIM = False print 'StomPy step simulation ... done.' ## print self.SSA.SSA.output[0] ## print self.SSA.SSA.output[1] ## print self.SSA.SSA.output[-1] ## header_line = self.SSA.SSA.output.pop(0) ## if self.SSA.SSA.output[0][-1] == '': ## self.SSA.SSA.output[0][-1] = 0.0 ## sim_dat = numpy.array(self.SSA.SSA.output, 'd') ## pscmod.data_stochsim = IntegrationStochasticDataObj() ## pscmod.data_stochsim.setTime(sim_dat[:,0]) ## pscmod.data_stochsim.setSpecies(sim_dat[:,1:-1], self.SSA_SPECIES) pscmod.data_stochsim = self.SSA.data_stochsim if self.STP_WAITING_TIMES: wtimes, wt_lbls = self.getWaitingtimesData(reactions=None,lbls=True) pscmod.data_stochsim.setWaitingtimes(wtimes, wt_lbls) if self.STP_TRACK_PROPENSITIES: pscmod.data_stochsim.setPropensities(self.SSA.SSA.propensities_output) pscmod.data_stochsim.TYPE_INFO = 'Stochastic' def getWaitingtimesData(self,reactions=None,lbls=False): """ Plots the waiting times for each reaction in the model. Makes use of ObtainWaitingtimes to derive the waiting times out of the SSA output. Input: - *reactions* [default=0] a list of reactions to plot defualts to all reactions - *traj* [default=0] trajectory to plot (defaults to first one) - *lbls* [default=False] if True return (data_array, column_labels) otherwise just data_array This method is derived from StomPy 0.9 (http://stompy.sf.net) Analysis.py """ if self.SSA.IsTauLeaping: print 'INFO: waiting times not available when method is Tau Leaping' if not lbls: return None else: return None, None self.SSA.GetWaitingtimes() if reactions == None: reactions = self.SSA_REACTIONS vect = [] vect_lbls = [] for r in reactions: if r in self.SSA_REACTIONS: vect.append(self.SSA_REACTIONS.index(r)+1) vect_lbls.append('wt'+str(r)) else: print "INFO: '%s' is not a valid reaction name" % r OUTPUT = [] ## for t in range(len(self.SSA.data_stochsim.waiting_times)): T_OUTPUT = [] for i in vect: if self.SSA.data_stochsim.waiting_times.has_key(i): waiting_time = self.SSA.data_stochsim.waiting_times[i] if len(waiting_time) > 1: # At least 2 waiting times are necessary per reaction T_OUTPUT.append(self.stompy.modules.Analysis.LogBin(waiting_time, 1.5)) # Create logarithmic bins else: T_OUTPUT.append(None) else: T_OUTPUT.append(None) OUTPUT.append(T_OUTPUT) if not lbls: return OUTPUT else: return OUTPUT, vect_lbls class IntegrationStochasticDataObj(object): """ This class is specifically designed to store the results of a stochastic time simulation It has methods for setting the Time, Labels, Species and Propensity data and getting Time, Species and Rate (including time) arrays. However, of more use: - getOutput(\*args) feed this method species/rate labels and it will return an array of [time, sp1, r1, ....] - getDataAtTime(time) the data generated at time point "time". - getDataInTimeInterval(time, bounds=None) more intelligent version of the above returns an array of all data points where: time-bounds <= time <= time+bounds """ time = None waiting_times = None species = None propensities = None xdata = None time_label = 'Time' waiting_time_labels = None species_labels = None propensities_labels = None xdata_labels = None HAS_SPECIES = False HAS_WAITING_TIMES = False HAS_PROPENSITIES = False HAS_TIME = False HAS_XDATA = False IS_VALID = True TYPE_INFO = 'Stochastic' def setLabels(self, species): """ Set the species - *species* a list of species labels """ self.species_labels = species def setTime(self, time, lbl=None): """ Set the time vector - *time* a 1d array of time points - *lbl* [default=None] is "Time" set as required """ self.time = time.reshape(len(time), 1) self.HAS_TIME = True if lbl != None: self.time_label = lbl def setSpecies(self, species, lbls=None): """ Set the species array - *species* an array of species vs time data - *lbls* [default=None] a list of species labels """ self.species = species self.HAS_SPECIES = True if lbls != None: self.species_labels = lbls def setWaitingtimes(self, waiting_times, lbls=None): """ Set the `waiting_times` this data structure is not an array but a nested list of: waiting time log bins per reaction per trajectory:: waiting_times = [traj_1, ..., traj_n] traj_1 = [wt_J1, ..., wt_Jn] # in order of SSA_REACTIONS wt_J1 = (xval, yval, nbin) xval =[x_1, ..., x_n] yval =[y_1, ..., y_n] nbin = n - *waiting_times* a list of waiting times - *lbls* [default=None] a list of matching reaction names """ self.waiting_times = waiting_times self.HAS_WAITING_TIMES = True if lbls != None: self.waiting_time_labels = lbls def setPropensities(self, propensities, lbls=None): """ Sets an array of propensities. - *propensities* a list of propensities - *lbls* [default=None] a list of matching reaction names """ if lbls == None: LB = copy.copy(propensities[0]) lbls = LB[1:] lbls = ['p'+str(r) for r in lbls] P_ARR = numpy.zeros((len(propensities), len(propensities[0])-1), 'd') P_ARR[-1,:] = numpy.NaN for r in range(1, P_ARR.shape[0]): P_ARR[r, :] = propensities[r][1:] self.propensities = P_ARR self.HAS_PROPENSITIES = True if lbls != None: self.propensities_labels = lbls ## print self.propensities_labels ## print self.propensities def setXData(self, xdata, lbls=None): """ Sets an array of extra simulation data - *xdata* an array of xdata vs time - *lbls* [default=None] a list of xdata labels """ self.xdata = xdata self.HAS_XDATA = True if lbls != None: self.xdata_labels = lbls def getTime(self, lbls=False): """ Return the time vector - *lbls* [default=False] return only the time array or optionally both the time array and time label """ output = None if self.HAS_TIME: output = self.time.reshape(len(self.time),) if not lbls: return output else: return output, [self.time_label] def getSpecies(self, lbls=False): """ Return an array fo time+species - *lbls* [default=False] return only the time+species array or optionally both the data array and a list of column label """ output = None if self.HAS_SPECIES: output = numpy.hstack((self.time, self.species)) labels = [self.time_label]+self.species_labels else: output = self.time labels = [self.time_label] if not lbls: return output else: return output, labels def getWaitingtimes(self, lbls=False, traj=[]): """ Return waiting times, time+waiting_time array - *lbls* [default=False] return only the time+waiting_time array or optionally both the data array and a list of column label - *traj* [default=[0]] return the firs or trajectories defined in this list """ output = None labels = None if self.HAS_WAITING_TIMES: output = [] if len(traj) == 0: traj = range(len(self.waiting_times)) ## if len(traj) == 1: ## output = self.waiting_times[0] ## else: for t in traj: output.append(self.waiting_times[t]) labels = self.waiting_time_labels else: output = [] labels = [] if not lbls: return output else: return output, labels def getPropensities(self, lbls=False): """ Return time+propensity array - *lbls* [default=False] return only the time+propensity array or optionally both the data array and a list of column label """ #assert self.propensities != None, "\nNo propensities" output = None if self.HAS_PROPENSITIES: print self.time.shape print self.propensities.shape output = numpy.hstack((self.time, self.propensities)) labels = [self.time_label]+self.propensities_labels else: output = self.time labels = [self.time_label] if not lbls: return output else: return output, labels def getXData(self, lbls=False): """ Return time+xdata array - *lbls* [default=False] return only the time+xdata array or optionally both the data array and a list of column label """ output = None if self.HAS_XDATA: output = numpy.hstack((self.time, self.xdata)) labels = [self.time_label]+self.xdata_labels else: output = self.time labels = [self.time_label] if not lbls: return output else: return output, labels def getDataAtTime(self, time): """ Return all data generated at "time" - *time* the required exact time point """ #TODO add rate rule data t = None sp = None ra = None ru = None xd = None temp_t = self.time.reshape(len(self.time),) for tt in range(len(temp_t)): if temp_t[tt] == time: t = tt if self.HAS_SPECIES: sp = self.species.take([tt], axis=0) if self.HAS_PROPENSITIES: ru = self.propensities.take([tt], axis=0) if self.HAS_XDATA: xd = self.xdata.take([tt], axis=0) break output = None if t is not None: output = numpy.array([[temp_t[t]]]) if sp is not None: output = numpy.hstack((output,sp)) if ra is not None: output = numpy.hstack((output,ra)) if ru is not None: output = numpy.hstack((output,ru)) if xd is not None: output = numpy.hstack((output,xd)) return output def getDataInTimeInterval(self, time, bounds=None): """ Returns an array of all data in interval: time-bounds <= time <= time+bounds where bound defaults to stepsize - *time* the interval midpoint - *bounds* [default=None] interval halfspan defaults to stepsize """ temp_t = self.time.reshape(len(self.time),) if bounds == None: bounds = temp_t[1] - temp_t[0] c1 = (temp_t >= time-bounds) c2 = (temp_t <= time+bounds) print 'Searching (%s:%s:%s)' % (time-bounds, time, time+bounds) t = [] sp = None ra = None for tt in range(len(c1)): if c1[tt] and c2[tt]: t.append(tt) output = None if len(t) > 0: output = self.time.take(t) output = output.reshape(len(output),1) if self.HAS_SPECIES and self.HAS_TIME: output = numpy.hstack((output, self.species.take(t, axis=0))) if self.HAS_PROPENSITIES: output = numpy.hstack((output, self.propensities.take(t, axis=0))) if self.HAS_XDATA: output = numpy.hstack((output, self.xdata.take(t, axis=0))) return output def getAllSimData(self,lbls=False): """ Return an array of time + all available simulation data - *lbls* [default=False] return only the data array or (data array, list of labels) """ labels = [self.time_label] if self.HAS_SPECIES and self.HAS_TIME: output = numpy.hstack((self.time, self.species)) labels += self.species_labels if self.HAS_PROPENSITIES: output = numpy.hstack((output, self.propensities)) labels += self.propensities_labels if self.HAS_XDATA: output = numpy.hstack((output, self.xdata)) labels += self.xdata_labels if not lbls: return output else: return output, labels def getSimData(self, *args, **kwargs): """ Feed this method species/xdata labels and it will return an array of [time, sp1, ....] - 'speces_l', 'xdatal' ... - *lbls* [default=False] return only the data array or (data array, list of labels) """ output = self.time if kwargs.has_key('lbls'): lbls = kwargs['lbls'] else: lbls = False lout = [self.time_label] for roc in args: if self.HAS_SPECIES and roc in self.species_labels: lout.append(roc) output = numpy.hstack((output, self.species.take([self.species_labels.index(roc)], axis=-1))) if self.HAS_PROPENSITIES and roc in self.propensities_labels: lout.append(roc) output = numpy.hstack((output, self.propensities.take([self.propensities_labels.index(roc)], axis=-1))) if self.HAS_XDATA and roc in self.xdata_labels: lout.append(roc) output = numpy.hstack((output, self.xdata.take([self.xdata_labels.index(roc)], axis=-1))) if not lbls: return output else: return output, lout class PysMod: #STOMPY INSERT START def StochSimPlot(self, plot='species', filename=None, title=None, log=None, format='points'): """ Plot the Stochastic simulation results, uses the new UPI pysces.plt interface: - *plot* [default='species'] output to plot, can be one of: - 'all' species and propensities - 'species' species - 'waiting_times' waiting_times - 'propensities' propensities - `['S1', 'R1', ]` a list of model attributes ('species') - *filename* [default=None] if defined file is exported to filename - *title* [default=None] the plot title - *log* [default=None] use log axis for axis 'x', 'y', 'xy' - *format* [default='lines'] use UPI or backend specific keys """ data = None labels = None allowedplots = ['all', 'species', 'propensities','waiting_times'] ## allowedplots = ['all', 'species', 'waiting_times'] if type(plot) != list and plot not in allowedplots: raise RuntimeError, '\nPlot must be one of %s not \"%s\"' % (str(allowedplots), plot) if plot == 'all': ## data, labels = self.data_stochsim.getSpecies(lbls=True) data, labels = self.data_stochsim.getAllSimData(lbls=True) elif plot == 'species': data, labels = self.data_stochsim.getSpecies(lbls=True) elif plot == 'propensities': data, labels = self.data_stochsim.getPropensities(lbls=True) ## data, labels = self.data_stochsim.getRates(lbls=True) elif plot == 'waiting_times': dataLst, labels = self.data_stochsim.getWaitingtimes(lbls=True) format='points' ## data, labels = self.data_stochsim.getRates(lbls=True) else: plot = [at for at in plot if at in self.__species__+[self.data_stochsim.time_label]+self.data_stochsim.propensities_labels] kwargs = {'lbls' : True} print plot if len(plot) > 0: data, labels = self.data_stochsim.getSimData(*plot, **kwargs) del allowedplots xu = 'Time (%(multiplier)s x %(kind)s x 10**%(scale)s)**%(exponent)s' % self.__uDict__['time'] if plot == 'waiting_times': xu = 'Inter-arrival time (%s)' % xu xrng_start = 0.1 xrng_end = 0.1 yrng_start = 0.1 yrng_end = 0.1 for wt in range(len(dataLst)): for d in range(len(dataLst[wt])): D = dataLst[wt][d] if plt.__USE_MATPLOTLIB__ and d > 0: plt.m.hold(True) if D != None and len(D[0]) > 0 and len(D[1]) > 0: data = numpy.vstack([D[0], D[1]]).transpose() if min(D[0]) < xrng_start and min(D[0]) > 0.0: xrng_start = min(D[0]) if max(D[0]) > xrng_end: xrng_end = max(D[0]) if min(D[1]) < yrng_start and min(D[1]) > 0.0: yrng_start = min(D[1]) if max(D[1]) > yrng_end: yrng_end = max(D[1]) plt.plotLines(data, 0, [1], titles=['Time']+[labels[d]], formats=[format]) plt.setRange('x', xrng_start*0.8, xrng_end*1.2) plt.setRange('y', yrng_start*0.8, yrng_end*1.2) if plt.__USE_MATPLOTLIB__: plt.m.hold(False) else: plt.plotLines(data, 0, range(1, data.shape[1]), titles=labels, formats=[format]) # set the x-axis range so that it is original range + 0.2*sim_end # this is a sceintifcally dtermned amount of space that is needed for the title at the # end of the line :-) - brett 20040209 RngTime = self.data_stochsim.getTime() end = RngTime[-1] + 0.2*RngTime[-1] plt.setRange('x', RngTime[0], end) del RngTime # For now StochPy results are plotted as Amounts directly from StochPy M = 'Amount' ## if self.__KeyWords__['Output_In_Conc']: ## M = 'Concentration' ## else: ## M = 'Amount (%(multiplier)s x %(kind)s x 10**%(scale)s)**%(exponent)s' % self.__uDict__['substance'] if plot == 'all': yl = 'Amount, propensities' elif plot == 'propensities': yl = 'Propensities' elif plot == 'waiting_times': yl = 'Frequency' if log == None: log = 'xy' elif plot == 'species': yl = '%s' % M else: yl = 'User defined' plt.setAxisLabel('x', xu) plt.setAxisLabel('y', yl) if log != None: plt.setLogScale(log) if title == None: plt.setGraphTitle('PySCeS/StochPy simulation (' + self.ModelFile + ') ' + time.strftime("%a, %d %b %Y %H:%M:%S")) else: plt.setGraphTitle(title) plt.replot() if filename != None: plt.export(filename, directory=self.ModelOutput, type='png') def doStochSim(self,end=10,mode='steps',method='Direct',trajectories=1): """ doStochSim(end=10, mode='steps', method='Direct') Run a stochastic simulation for until `end` is reached. This can be either steps or end time (which could be a *HUGE* number of steps). Arguments: - *end* [default=10] simulation end (steps or time) - *mode* [default='steps'] simulation mode, can be one of: - *steps* total number of steps to simulate - *time* simulate until time is reached - *method* [default='Direct'] stochastic algorithm, can be one of: - Direct - FirstReactionMethod - NextReactionMethod - TauLeaping """ if method not in ['Direct', 'FirstReactionMethod','NextReactionMethod','TauLeaping']: print 'Method "%s" not recognised using: "Direct"' % method method = 'Direct' if mode not in ['steps','time']: print 'Mode "%s" not recognised using: "steps"' % mode mode = 'steps' stompy_track_propensities = True stompy_keep_psc_files = False self.__STOMPY__.setProperty(method=method, trajectories=trajectories, interactive=True, track_propensities=stompy_track_propensities, keep_psc_files=stompy_keep_psc_files) if mode == 'time': self.__STOMPY__.runTimeSimulation(self, endtime=end, method=method) else: self.__STOMPY__.runStepSimulation(self, steps=end, method=method) def doStochSimPlot(self, end=10.0, mode='steps', method='Direct', plot='species', fmt='points', log=None, filename=None): """ doStochSimPlot(end=10.0, mode='steps', method='Direct', plot='species', fmt='points', log=None, filename=None) Run a stochastic simulation for until `end` is reached and plot the results. This can be either steps or end time (which could be a *HUGE* number of steps). Arguments: - *end* [default=10] simulation end (steps or time) - *mode* [default='steps'] simulation mode, can be one of: - *steps* total number of 'steps' to simulate - *time* simulate until 'time' is reached - *method* [default='Direct'] stochastic algorithm, can be one of: - Direct - FirstReactionMethod - NextReactionMethod - TauLeaping - *plot* [default='species'] output to plot, can be one of: - 'all' species and propensities - 'species' species - 'waiting_times' waiting_times - 'propensities' propensities - `['S1', 'R1', ]` a list of model attributes ('species') - *filename* [default=None] if defined file is exported to filename - *title* [default=None] the plot title - *log* [default=None] use log axis for axis 'x', 'y', 'xy' - *fmt* [default='lines'] use UPI or backend specific keys """ self.doStochSim(end=end, mode=mode, method=method,trajectories=1) self.StochSimPlot(plot='species', filename=filename, log=log, format=fmt) #STOMPY INSERT START if not _HAVE_STOMPY: def nofunc(self, *args, **kwargs): print '\nStochastic simulation not available, please download/install *StomPy* from: http://stompy.sf.net\n' PysMod.doStochSim = nofunc PysMod.doStochSimPlot = nofunc PysMod.StochSimPlot = nofunc '''
bsd-3-clause
621,127,197,163,312,800
36.223675
179
0.522609
false
6/jcrawler
mbga_parser.py
1
5359
#-*- encoding:utf-8 -*- """ Parse MBGA data to generate statistics. """ import glob import os import re import csv import numpy from PIL import Image from datetime import datetime DATA_PATH = "data/mbga/{0}/" PERMISSIONS = { "メンバー全員": 1 # all members ,"主催者+副管理": 2 # sponsors and moderators ,"主催者のみ": 3 # sponsors } EMOTIONS = { "normal": 1 ,"shy": 2 ,"smile": 3 ,"angry": 4 ,"cry": 5 } def analyze_groups(): group_files = files('group', '*.data') groups = [] min_dist, max_dist = None, 0 for i in range(1, len(group_files), 2): n_members, permissions = parse(group_files[i-1], meta_parser) dist = parse(group_files[i], time_dist_parser) if dist and dist > max_dist: max_dist = dist if dist and (dist < min_dist or min_dist is None): min_dist = dist if not dist: dist = 0 groups.append([n_members, permissions, dist]) min_members_dist, max_members_dist = None, 0 for i,g in enumerate(groups): if g[2] is 0: groups[i].append(0) continue n_members, dist = g[0], float(g[2]) # scale from 0.01 (least activity) to 1.0 (most activity) scaled_dist = 1 - ((dist - min_dist) / (max_dist - min_dist) * 0.99) groups[i][2] = scaled_dist members_dist = scaled_dist / n_members groups[i].append(members_dist) if members_dist < min_members_dist or min_members_dist is None: min_members_dist = members_dist if members_dist > max_members_dist: max_members_dist = members_dist for i,g in enumerate(groups): if g[3] is 0: continue members_dist = g[3] scaled_members_dist = (members_dist - min_members_dist) / (max_members_dist - min_members_dist) * 0.99 + 0.01 groups[i][3] = scaled_members_dist print "n groups: {0}".format(len(groups)) headers = ('n_members','permissions','distance','member_distance') write_csv('mbga_groups.csv', headers, groups) def meta_parser(path, data): meta = re.findall("<li>([^<]+)</li>", data) meta = map(lambda x: x.split(":")[1], meta) # return [number of members, permissions] return int(meta[0].split("人")[0]), PERMISSIONS[meta[2]] def analyze_people(): ids = people_ids() mins = {'diary':None, 'greet':None, 'disc':None, 'test':None} maxs = {'diary':0, 'greet':0, 'disc':0, 'test':0} people = [] for i,id in enumerate(ids): # gather all data files associated with a specific person ID p_files = files('person', '*_{0}_*.data'.format(id)) data = {} for f in p_files: ftype = f.split("_")[-1].split(".")[0] if ftype == "demo": data['age'] = parse(f, demographics_parser) elif ftype in ["diary","greet","disc","test"]: dist = parse(f, time_dist_parser) data[ftype] = dist if dist and (mins[ftype] is None or dist < mins[ftype]): mins[ftype] = dist if dist and dist > maxs[ftype]: maxs[ftype] = dist people.append(data) people_csv = [] for i,person in enumerate(people): person_csv = [] for dtype,value in person.items(): if dtype == "age" or not value: if not value: value = 0 person_csv.append((dtype, value)) continue dist = float(value) scaled_dist = 1 - ((dist - mins[dtype])/(maxs[dtype] - mins[dtype])*0.99) person_csv.append((dtype, scaled_dist)) person_csv.sort() people_csv.append(map(lambda x: x[-1], person_csv)) headers = ('age', 'diary', 'disc', 'greet', 'intro') write_csv('mbga_people.csv', headers, people_csv) def people_ids(): people_files = files('person', '*.data') n_people = len(people_files)/7 people_ids = [] id_regex = re.compile("[0-9]+_([0-9]+)_[0-9]+") for f in people_files: m = id_regex.search(f) people_ids.append(m.group(1)) return set(people_ids) def demographics_parser(path, data): data = data.split("<dt>") age = -1 for d in data: if d.startswith ("誕生日(年齢)"): # birthdate (age) age = re.findall("[0-9]+", re.findall("<dd>([^<]+)</dd>", d)[0])[-1] return age def time_dist_parser(path, data): dist = False extracted = path.split("/")[-1].split("_")[0] time_extracted = datetime.strptime(extracted, "%Y%m%d%H%M%S") dates = re.findall("[0-9]{4}/[0-9]+/[0-9]+ [0-9]+:[0-9]+", data) if dates: oldest = datetime.strptime(dates[-1], "%Y/%m/%d %H:%M") dist = time_extracted - oldest dist = (dist.days * 86400) + dist.seconds return dist def analyze_avatars(): avatars = files('avatar', '*.png') data = [] for i,a in enumerate(avatars): pic = numpy.array(Image.open(a)) num_black_pixels = len(numpy.where(pic[0:1][0:1] == 0)[0]) bg_mod = 0 if num_black_pixels == 150 else 1 emotion = a.split("/")[-1].split("_")[-1].split(".")[0] data.append([EMOTIONS[emotion], bg_mod]) headers = ("emotion", "bg_mod") write_csv('mbga_avatars.csv', headers, data) def parse(data_path, parser): f = open(data_path, 'r').read() return parser(data_path, f) def files(folder, pattern): return glob.glob(os.path.join(DATA_PATH.format(folder), pattern)) def write_csv(fname, headers, list_of_lists): f = open(fname, 'wb') writer = csv.writer(f) writer.writerow(headers) for l in list_of_lists: writer.writerow(l) f.close() if __name__=="__main__": #analyze_groups() #analyze_people() analyze_avatars()
mit
6,223,290,673,111,375,000
30.426036
113
0.604029
false
carthach/essentia
test/src/unittests/standard/test_panning.py
1
5359
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * from math import log def cutFrames(params, input): framegen = FrameGenerator(input, frameSize = params['frameSize'], hopSize = params['hopSize'], startFromZero = params['startFromZero']) return [ frame for frame in framegen ] class TestPanning(TestCase): def testRegression(self): # After comparing the results of panning with jordi janner's matlab # code, we have concluded that although they are not the same exact # numbersi, the algorithm seems to show correct output. Differences may # be due to essentia not being compiled for doubles, or may com from # slight differences in fft outputs. Window types and/or sizes or # normalization seem not to be critical for the final result. # On this respect, files computed with essentia at the time of this # writing (11/11/2008) have been included in order to have a regression # test that passes the test. # 10/07/2013: We switched to different testing audio files and recomputed # the files with essentia assuming that Panning algo works correctly since # it seems no fixes were ever done to it since the original test (11/11/2008) # 2015-03-02: Recomputed the values again, as they are a bit different, probably # due to changes introduced after switching to newer LibAV testdir = join(filedir(), 'panning') expected = readMatrix(join(testdir, 'essentia', 'musicbox_essentia_panning.txt')) framesize = 8192 hopsize = 2048 zeropadding = 1 sampleRate = 44100 filename = join(testdata.audio_dir, 'recorded', 'musicbox.wav') left = MonoLoader(filename = filename, downmix = 'left', sampleRate = sampleRate)() right = MonoLoader(filename = filename, downmix = 'right', sampleRate = sampleRate)() frames_left = cutFrames({ 'frameSize': framesize, 'hopSize': hopsize, 'startFromZero': False },left) frames_right = cutFrames({ 'frameSize': framesize, 'hopSize': hopsize, 'startFromZero': False },right) spec = Spectrum() window = Windowing(size=framesize, zeroPadding=framesize*zeropadding, type = 'hann') panning = Panning(averageFrames=21) # matlab tests were generated with 21 (1second at 44100Hz) output = [] for i in range(len(frames_left)): output = panning(spec(window(frames_left[i])), spec(window(frames_right[i]))) # readVector messes up with the last digits, so for small numbers # we get errors above 1e-7: Is there a way to set precision in # python? self.assertAlmostEqualVectorFixedPrecision(expected[i], output[0], 2) def testMono(self): # checks that it works for mono signals loaded with audioloader, thus # right channel = 0 inputSize = 512 numCoeffs = 20 specLeft = ones(inputSize) specRight = zeros(inputSize) panning = Panning(numCoeffs = numCoeffs) n = 0 while n < 10: result = panning(specLeft, specRight) self.assertValidNumber(result.all()) n += 1 def testZero(self): inputSize = 512 numCoeffs = 20 expected = [-2.29359070e+02, -1.38243276e-03, -4.49713528e-01, 4.14732238e-03, 4.49690998e-01, -6.91174902e-03, -4.49645758e-01, 9.67658963e-03, 4.49589103e-01, -1.24400388e-02, -4.49509650e-01, 1.52042406e-02, 4.49419141e-01, -1.79666542e-02, -4.49305773e-01, 2.07295977e-02, 4.49181348e-01, -2.34902110e-02, -4.49033886e-01, 2.62518562e-02] spec = zeros(inputSize) panning = Panning(numCoeffs = numCoeffs)(spec, spec) self.assertAlmostEqualVector(panning[0], expected, 5e-4) def testEmpty(self): self.assertComputeFails(Panning(), [], []) def testInvalidParam(self): self.assertConfigureFails(Panning(), {'averageFrames': -1}) self.assertConfigureFails(Panning(), {'panningBins': 0}) self.assertConfigureFails(Panning(), {'numBands': 0}) self.assertConfigureFails(Panning(), {'numCoeffs': 0}) self.assertConfigureFails(Panning(), {'sampleRate': 0}) suite = allTests(TestPanning) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
agpl-3.0
-1,239,211,323,385,803,000
41.872
110
0.640418
false
Magnetic/luigi
test/range_test.py
1
62977
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import fnmatch from helpers import unittest, LuigiTestCase import luigi import mock from luigi.mock import MockTarget, MockFileSystem from luigi.tools.range import (RangeDaily, RangeDailyBase, RangeEvent, RangeHourly, RangeHourlyBase, RangeByMinutes, RangeByMinutesBase, _constrain_glob, _get_filesystems_and_globs, RangeMonthly) class CommonDateMinuteTask(luigi.Task): dh = luigi.DateMinuteParameter() def output(self): return MockTarget(self.dh.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm%H%Mdara21/ooo')) class CommonDateHourTask(luigi.Task): dh = luigi.DateHourParameter() def output(self): return MockTarget(self.dh.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm%Hdara21/ooo')) class CommonDateTask(luigi.Task): d = luigi.DateParameter() def output(self): return MockTarget(self.d.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm01dara21/ooo')) class CommonMonthTask(luigi.Task): m = luigi.MonthParameter() def output(self): return MockTarget(self.m.strftime('/n2000y01a05n/%Y_%maww/21mm01dara21/ooo')) task_a_paths = [ 'TaskA/2014-03-20/18', 'TaskA/2014-03-20/21', 'TaskA/2014-03-20/23', 'TaskA/2014-03-21/00', 'TaskA/2014-03-21/00.attempt.1', 'TaskA/2014-03-21/00.attempt.2', 'TaskA/2014-03-21/01', 'TaskA/2014-03-21/02', 'TaskA/2014-03-21/03.attempt-temp-2014-03-21T13-22-58.165969', 'TaskA/2014-03-21/03.attempt.1', 'TaskA/2014-03-21/03.attempt.2', 'TaskA/2014-03-21/03.attempt.3', 'TaskA/2014-03-21/03.attempt.latest', 'TaskA/2014-03-21/04.attempt-temp-2014-03-21T13-23-09.078249', 'TaskA/2014-03-21/12', 'TaskA/2014-03-23/12', ] task_b_paths = [ 'TaskB/no/worries2014-03-20/23', 'TaskB/no/worries2014-03-21/01', 'TaskB/no/worries2014-03-21/03', 'TaskB/no/worries2014-03-21/04.attempt-yadayada', 'TaskB/no/worries2014-03-21/05', ] mock_contents = task_a_paths + task_b_paths expected_a = [ 'TaskA(dh=2014-03-20T17)', 'TaskA(dh=2014-03-20T19)', 'TaskA(dh=2014-03-20T20)', ] # expected_reverse = [ # ] expected_wrapper = [ 'CommonWrapperTask(dh=2014-03-21T00)', 'CommonWrapperTask(dh=2014-03-21T02)', 'CommonWrapperTask(dh=2014-03-21T03)', 'CommonWrapperTask(dh=2014-03-21T04)', 'CommonWrapperTask(dh=2014-03-21T05)', ] class TaskA(luigi.Task): dh = luigi.DateHourParameter() def output(self): return MockTarget(self.dh.strftime('TaskA/%Y-%m-%d/%H')) class TaskB(luigi.Task): dh = luigi.DateHourParameter() complicator = luigi.Parameter() def output(self): return MockTarget(self.dh.strftime('TaskB/%%s%Y-%m-%d/%H') % self.complicator) class TaskC(luigi.Task): dh = luigi.DateHourParameter() def output(self): return MockTarget(self.dh.strftime('not/a/real/path/%Y-%m-%d/%H')) class CommonWrapperTask(luigi.WrapperTask): dh = luigi.DateHourParameter() def requires(self): yield TaskA(dh=self.dh) yield TaskB(dh=self.dh, complicator='no/worries') # str(self.dh) would complicate beyond working class TaskMinutesA(luigi.Task): dm = luigi.DateMinuteParameter() def output(self): return MockTarget(self.dm.strftime('TaskA/%Y-%m-%d/%H%M')) class TaskMinutesB(luigi.Task): dm = luigi.DateMinuteParameter() complicator = luigi.Parameter() def output(self): return MockTarget(self.dm.strftime('TaskB/%%s%Y-%m-%d/%H%M') % self.complicator) class TaskMinutesC(luigi.Task): dm = luigi.DateMinuteParameter() def output(self): return MockTarget(self.dm.strftime('not/a/real/path/%Y-%m-%d/%H%M')) class CommonWrapperTaskMinutes(luigi.WrapperTask): dm = luigi.DateMinuteParameter() def requires(self): yield TaskMinutesA(dm=self.dm) yield TaskMinutesB(dm=self.dm, complicator='no/worries') # str(self.dh) would complicate beyond working def mock_listdir(contents): def contents_listdir(_, glob): for path in fnmatch.filter(contents, glob + '*'): yield path return contents_listdir def mock_exists_always_true(_, _2): yield True def mock_exists_always_false(_, _2): yield False class ConstrainGlobTest(unittest.TestCase): def test_limit(self): glob = '/[0-9][0-9][0-9][0-9]/[0-9][0-9]/[0-9][0-9]/[0-9][0-9]' paths = [(datetime.datetime(2013, 12, 31, 5) + datetime.timedelta(hours=h)).strftime('/%Y/%m/%d/%H') for h in range(40)] self.assertEqual(sorted(_constrain_glob(glob, paths)), [ '/2013/12/31/[0-2][0-9]', '/2014/01/01/[0-2][0-9]', ]) paths.pop(26) self.assertEqual(sorted(_constrain_glob(glob, paths, 6)), [ '/2013/12/31/0[5-9]', '/2013/12/31/1[0-9]', '/2013/12/31/2[0-3]', '/2014/01/01/0[012345689]', '/2014/01/01/1[0-9]', '/2014/01/01/2[0]', ]) self.assertEqual(sorted(_constrain_glob(glob, paths[:7], 10)), [ '/2013/12/31/05', '/2013/12/31/06', '/2013/12/31/07', '/2013/12/31/08', '/2013/12/31/09', '/2013/12/31/10', '/2013/12/31/11', ]) def test_no_wildcards(self): glob = '/2014/01' paths = '/2014/01' self.assertEqual(_constrain_glob(glob, paths), [ '/2014/01', ]) def datetime_to_epoch(dt): td = dt - datetime.datetime(1970, 1, 1) return td.days * 86400 + td.seconds + td.microseconds / 1E6 class RangeDailyBaseTest(unittest.TestCase): maxDiff = None def setUp(self): # yucky to create separate callbacks; would be nicer if the callback # received an instance of a subclass of Event, so one callback could # accumulate all types @RangeDailyBase.event_handler(RangeEvent.DELAY) def callback_delay(*args): self.events.setdefault(RangeEvent.DELAY, []).append(args) @RangeDailyBase.event_handler(RangeEvent.COMPLETE_COUNT) def callback_complete_count(*args): self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args) @RangeDailyBase.event_handler(RangeEvent.COMPLETE_FRACTION) def callback_complete_fraction(*args): self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args) self.events = {} def test_consistent_formatting(self): task = RangeDailyBase(of=CommonDateTask, start=datetime.date(2016, 1, 1)) self.assertEqual(task._format_range([datetime.datetime(2016, 1, 2, 13), datetime.datetime(2016, 2, 29, 23)]), '[2016-01-02, 2016-02-29]') def _empty_subcase(self, kwargs, expected_events): calls = [] class RangeDailyDerived(RangeDailyBase): def missing_datetimes(self, task_cls, finite_datetimes): args = [self, task_cls, finite_datetimes] calls.append(args) return args[-1][:5] task = RangeDailyDerived(of=CommonDateTask, **kwargs) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes self.assertEqual(self.events, expected_events) self.assertTrue(task.complete()) def test_stop_before_days_back(self): # nothing to do because stop is earlier self._empty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2015, 1, 1, 4)), 'stop': datetime.date(2014, 3, 20), 'days_back': 4, 'days_forward': 20, 'reverse': True, }, { 'event.tools.range.delay': [ ('CommonDateTask', 0), ], 'event.tools.range.complete.count': [ ('CommonDateTask', 0), ], 'event.tools.range.complete.fraction': [ ('CommonDateTask', 1.), ], } ) def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events): calls = [] class RangeDailyDerived(RangeDailyBase): def missing_datetimes(self, finite_datetimes): # I only changed tests for number of arguments at this one # place to test both old and new behavior calls.append((self, finite_datetimes)) return finite_datetimes[:7] task = RangeDailyDerived(of=CommonDateTask, **kwargs) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual((min(calls[0][1]), max(calls[0][1])), expected_finite_datetimes_range) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again self.assertEqual(self.events, expected_events) self.assertFalse(task.complete()) def test_start_long_before_long_days_back_and_with_long_days_forward(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)), 'start': datetime.date(2011, 3, 20), 'stop': datetime.date(2025, 1, 29), 'task_limit': 4, 'days_back': 3 * 365, 'days_forward': 3 * 365, }, (datetime.datetime(2014, 10, 24), datetime.datetime(2020, 10, 21)), [ 'CommonDateTask(d=2014-10-24)', 'CommonDateTask(d=2014-10-25)', 'CommonDateTask(d=2014-10-26)', 'CommonDateTask(d=2014-10-27)', ], { 'event.tools.range.delay': [ ('CommonDateTask', 3750), ], 'event.tools.range.complete.count': [ ('CommonDateTask', 5057), ], 'event.tools.range.complete.fraction': [ ('CommonDateTask', 5057. / (5057 + 7)), ], } ) class RangeHourlyBaseTest(unittest.TestCase): maxDiff = None def setUp(self): # yucky to create separate callbacks; would be nicer if the callback # received an instance of a subclass of Event, so one callback could # accumulate all types @RangeHourlyBase.event_handler(RangeEvent.DELAY) def callback_delay(*args): self.events.setdefault(RangeEvent.DELAY, []).append(args) @RangeHourlyBase.event_handler(RangeEvent.COMPLETE_COUNT) def callback_complete_count(*args): self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args) @RangeHourlyBase.event_handler(RangeEvent.COMPLETE_FRACTION) def callback_complete_fraction(*args): self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args) self.events = {} def test_consistent_formatting(self): task = RangeHourlyBase(of=CommonDateHourTask, start=datetime.datetime(2016, 1, 1)) self.assertEqual(task._format_range([datetime.datetime(2016, 1, 2, 13), datetime.datetime(2016, 2, 29, 23)]), '[2016-01-02T13, 2016-02-29T23]') def _empty_subcase(self, kwargs, expected_events): calls = [] class RangeHourlyDerived(RangeHourlyBase): def missing_datetimes(a, b, c): args = [a, b, c] calls.append(args) return args[-1][:5] task = RangeHourlyDerived(of=CommonDateHourTask, **kwargs) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes self.assertEqual(self.events, expected_events) self.assertTrue(task.complete()) def test_start_after_hours_forward(self): # nothing to do because start is later self._empty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)), 'start': datetime.datetime(2014, 3, 20, 17), 'hours_back': 4, 'hours_forward': 20, }, { 'event.tools.range.delay': [ ('CommonDateHourTask', 0), ], 'event.tools.range.complete.count': [ ('CommonDateHourTask', 0), ], 'event.tools.range.complete.fraction': [ ('CommonDateHourTask', 1.), ], } ) def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events): calls = [] class RangeHourlyDerived(RangeHourlyBase): def missing_datetimes(a, b, c): args = [a, b, c] calls.append(args) return args[-1][:7] task = RangeHourlyDerived(of=CommonDateHourTask, **kwargs) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual(calls[0][1], CommonDateHourTask) self.assertEqual((min(calls[0][2]), max(calls[0][2])), expected_finite_datetimes_range) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again self.assertEqual(self.events, expected_events) self.assertFalse(task.complete()) def test_start_long_before_hours_back(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)), 'start': datetime.datetime(1960, 3, 2, 1), 'hours_back': 5, 'hours_forward': 20, }, (datetime.datetime(1999, 12, 31, 23), datetime.datetime(2000, 1, 1, 23)), [ 'CommonDateHourTask(dh=1999-12-31T23)', 'CommonDateHourTask(dh=2000-01-01T00)', 'CommonDateHourTask(dh=2000-01-01T01)', 'CommonDateHourTask(dh=2000-01-01T02)', 'CommonDateHourTask(dh=2000-01-01T03)', 'CommonDateHourTask(dh=2000-01-01T04)', 'CommonDateHourTask(dh=2000-01-01T05)', ], { 'event.tools.range.delay': [ ('CommonDateHourTask', 25), # because of short hours_back we're oblivious to those 40 preceding years ], 'event.tools.range.complete.count': [ ('CommonDateHourTask', 349192), ], 'event.tools.range.complete.fraction': [ ('CommonDateHourTask', 349192. / (349192 + 7)), ], } ) def test_start_after_long_hours_back(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2014, 10, 22, 12, 4, 29)), 'start': datetime.datetime(2014, 3, 20, 17), 'task_limit': 4, 'hours_back': 365 * 24, }, (datetime.datetime(2014, 3, 20, 17), datetime.datetime(2014, 10, 22, 12)), [ 'CommonDateHourTask(dh=2014-03-20T17)', 'CommonDateHourTask(dh=2014-03-20T18)', 'CommonDateHourTask(dh=2014-03-20T19)', 'CommonDateHourTask(dh=2014-03-20T20)', ], { 'event.tools.range.delay': [ ('CommonDateHourTask', 5180), ], 'event.tools.range.complete.count': [ ('CommonDateHourTask', 5173), ], 'event.tools.range.complete.fraction': [ ('CommonDateHourTask', 5173. / (5173 + 7)), ], } ) def test_start_long_before_long_hours_back_and_with_long_hours_forward(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)), 'start': datetime.datetime(2011, 3, 20, 17), 'task_limit': 4, 'hours_back': 3 * 365 * 24, 'hours_forward': 3 * 365 * 24, }, (datetime.datetime(2014, 10, 23, 13), datetime.datetime(2020, 10, 21, 12)), [ 'CommonDateHourTask(dh=2014-10-23T13)', 'CommonDateHourTask(dh=2014-10-23T14)', 'CommonDateHourTask(dh=2014-10-23T15)', 'CommonDateHourTask(dh=2014-10-23T16)', ], { 'event.tools.range.delay': [ ('CommonDateHourTask', 52560), ], 'event.tools.range.complete.count': [ ('CommonDateHourTask', 84061), ], 'event.tools.range.complete.fraction': [ ('CommonDateHourTask', 84061. / (84061 + 7)), ], } ) class RangeByMinutesBaseTest(unittest.TestCase): maxDiff = None def setUp(self): # yucky to create separate callbacks; would be nicer if the callback # received an instance of a subclass of Event, so one callback could # accumulate all types @RangeByMinutesBase.event_handler(RangeEvent.DELAY) def callback_delay(*args): self.events.setdefault(RangeEvent.DELAY, []).append(args) @RangeByMinutesBase.event_handler(RangeEvent.COMPLETE_COUNT) def callback_complete_count(*args): self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args) @RangeByMinutesBase.event_handler(RangeEvent.COMPLETE_FRACTION) def callback_complete_fraction(*args): self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args) self.events = {} def test_consistent_formatting(self): task = RangeByMinutesBase(of=CommonDateMinuteTask, start=datetime.datetime(2016, 1, 1, 13), minutes_interval=5) self.assertEqual(task._format_range( [datetime.datetime(2016, 1, 2, 13, 10), datetime.datetime(2016, 2, 29, 23, 20)]), '[2016-01-02T1310, 2016-02-29T2320]') def _empty_subcase(self, kwargs, expected_events): calls = [] class RangeByMinutesDerived(RangeByMinutesBase): def missing_datetimes(a, b, c): args = [a, b, c] calls.append(args) return args[-1][:5] task = RangeByMinutesDerived(of=CommonDateMinuteTask, **kwargs) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes self.assertEqual(self.events, expected_events) self.assertTrue(task.complete()) def test_start_after_minutes_forward(self): # nothing to do because start is later self._empty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 4)), 'start': datetime.datetime(2014, 3, 20, 17, 10), 'minutes_back': 4, 'minutes_forward': 20, 'minutes_interval': 5, }, { 'event.tools.range.delay': [ ('CommonDateMinuteTask', 0), ], 'event.tools.range.complete.count': [ ('CommonDateMinuteTask', 0), ], 'event.tools.range.complete.fraction': [ ('CommonDateMinuteTask', 1.), ], } ) def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events): calls = [] class RangeByMinutesDerived(RangeByMinutesBase): def missing_datetimes(a, b, c): args = [a, b, c] calls.append(args) return args[-1][:7] task = RangeByMinutesDerived(of=CommonDateMinuteTask, **kwargs) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual(calls[0][1], CommonDateMinuteTask) self.assertEqual((min(calls[0][2]), max(calls[0][2])), expected_finite_datetimes_range) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again self.assertEqual(self.events, expected_events) self.assertFalse(task.complete()) def test_negative_interval(self): class SomeByMinutesTask(luigi.Task): d = luigi.DateMinuteParameter() def output(self): return MockTarget(self.d.strftime('/data/2014/p/v/z/%Y_/_%m-_-%doctor/20/%HZ%MOOO')) task = RangeByMinutes(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)), of=SomeByMinutesTask, start=datetime.datetime(2014, 3, 20, 17), minutes_interval=-1) self.assertRaises(luigi.parameter.ParameterException, task.requires) def test_non_dividing_interval(self): class SomeByMinutesTask(luigi.Task): d = luigi.DateMinuteParameter() def output(self): return MockTarget(self.d.strftime('/data/2014/p/v/z/%Y_/_%m-_-%doctor/20/%HZ%MOOO')) task = RangeByMinutes(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)), of=SomeByMinutesTask, start=datetime.datetime(2014, 3, 20, 17), minutes_interval=8) self.assertRaises(luigi.parameter.ParameterException, task.requires) def test_start_and_minutes_period(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2016, 9, 1, 12, 0, 0)), 'start': datetime.datetime(2016, 9, 1, 11, 0, 0), 'minutes_back': 24 * 60, 'minutes_forward': 0, 'minutes_interval': 3, }, (datetime.datetime(2016, 9, 1, 11, 0), datetime.datetime(2016, 9, 1, 11, 57, 0)), [ 'CommonDateMinuteTask(dh=2016-09-01T1100)', 'CommonDateMinuteTask(dh=2016-09-01T1103)', 'CommonDateMinuteTask(dh=2016-09-01T1106)', 'CommonDateMinuteTask(dh=2016-09-01T1109)', 'CommonDateMinuteTask(dh=2016-09-01T1112)', 'CommonDateMinuteTask(dh=2016-09-01T1115)', 'CommonDateMinuteTask(dh=2016-09-01T1118)', ], { 'event.tools.range.delay': [ ('CommonDateMinuteTask', 20), # First missing is the 20th ], 'event.tools.range.complete.count': [ ('CommonDateMinuteTask', 13), # 20 intervals - 7 missing ], 'event.tools.range.complete.fraction': [ ('CommonDateMinuteTask', 13. / (13 + 7)), # (exptected - missing) / expected ], } ) def test_start_long_before_minutes_back(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2000, 1, 1, 0, 3, 0)), 'start': datetime.datetime(1960, 1, 1, 0, 0, 0), 'minutes_back': 5, 'minutes_forward': 20, 'minutes_interval': 5, }, (datetime.datetime(2000, 1, 1, 0, 0), datetime.datetime(2000, 1, 1, 0, 20, 0)), [ 'CommonDateMinuteTask(dh=2000-01-01T0000)', 'CommonDateMinuteTask(dh=2000-01-01T0005)', 'CommonDateMinuteTask(dh=2000-01-01T0010)', 'CommonDateMinuteTask(dh=2000-01-01T0015)', 'CommonDateMinuteTask(dh=2000-01-01T0020)', ], { 'event.tools.range.delay': [ ('CommonDateMinuteTask', 5), # because of short minutes_back we're oblivious to those 40 preceding years ], 'event.tools.range.complete.count': [ ('CommonDateMinuteTask', 4207680), # expected intervals - missing. ], 'event.tools.range.complete.fraction': [ ('CommonDateMinuteTask', 4207680. / 4207685), # (expected - missing) / expected ], } ) def test_start_after_long_minutes_back(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2014, 3, 20, 18, 4, 29)), 'start': datetime.datetime(2014, 3, 20, 17, 10), 'task_limit': 4, 'minutes_back': 365 * 24 * 60, 'minutes_interval': 5, }, (datetime.datetime(2014, 3, 20, 17, 10, 0), datetime.datetime(2014, 3, 20, 18, 0, 0)), [ 'CommonDateMinuteTask(dh=2014-03-20T1710)', 'CommonDateMinuteTask(dh=2014-03-20T1715)', 'CommonDateMinuteTask(dh=2014-03-20T1720)', 'CommonDateMinuteTask(dh=2014-03-20T1725)', ], { 'event.tools.range.delay': [ ('CommonDateMinuteTask', 11), ], 'event.tools.range.complete.count': [ ('CommonDateMinuteTask', 4), ], 'event.tools.range.complete.fraction': [ ('CommonDateMinuteTask', 4. / 11), ], } ) def test_start_long_before_long_minutes_back_and_with_long_minutes_forward(self): self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2017, 3, 22, 20, 4, 29)), 'start': datetime.datetime(2011, 3, 20, 17, 10, 0), 'task_limit': 4, 'minutes_back': 365 * 24 * 60, 'minutes_forward': 365 * 24 * 60, 'minutes_interval': 5, }, (datetime.datetime(2016, 3, 22, 20, 5), datetime.datetime(2018, 3, 22, 20, 0)), [ 'CommonDateMinuteTask(dh=2016-03-22T2005)', 'CommonDateMinuteTask(dh=2016-03-22T2010)', 'CommonDateMinuteTask(dh=2016-03-22T2015)', 'CommonDateMinuteTask(dh=2016-03-22T2020)', ], { 'event.tools.range.delay': [ ('CommonDateMinuteTask', 210240), ], 'event.tools.range.complete.count': [ ('CommonDateMinuteTask', 737020), ], 'event.tools.range.complete.fraction': [ ('CommonDateMinuteTask', 737020. / (737020 + 7)), ], } ) class FilesystemInferenceTest(unittest.TestCase): def _test_filesystems_and_globs(self, datetime_to_task, datetime_to_re, expected): actual = list(_get_filesystems_and_globs(datetime_to_task, datetime_to_re)) self.assertEqual(len(actual), len(expected)) for (actual_filesystem, actual_glob), (expected_filesystem, expected_glob) in zip(actual, expected): self.assertTrue(isinstance(actual_filesystem, expected_filesystem)) self.assertEqual(actual_glob, expected_glob) def test_date_glob_successfully_inferred(self): self._test_filesystems_and_globs( lambda d: CommonDateTask(d), lambda d: d.strftime('(%Y).*(%m).*(%d)'), [ (MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm01dara21'), ] ) def test_datehour_glob_successfully_inferred(self): self._test_filesystems_and_globs( lambda d: CommonDateHourTask(d), lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'), [ (MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm[0-9][0-9]dara21'), ] ) def test_dateminute_glob_successfully_inferred(self): self._test_filesystems_and_globs( lambda d: CommonDateMinuteTask(d), lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H).*(%M)'), [ (MockFileSystem, '/n2000y01a05n/[0-9][0-9][0-9][0-9]_[0-9][0-9]-_-[0-9][0-9]aww/21mm[0-9][0-9][0-9][0-9]dara21'), ] ) def test_wrapped_datehour_globs_successfully_inferred(self): self._test_filesystems_and_globs( lambda d: CommonWrapperTask(d), lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'), [ (MockFileSystem, 'TaskA/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]'), (MockFileSystem, 'TaskB/no/worries[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]'), ] ) def test_inconsistent_output_datehour_glob_not_inferred(self): class InconsistentlyOutputtingDateHourTask(luigi.Task): dh = luigi.DateHourParameter() def output(self): base = self.dh.strftime('/even/%Y%m%d%H') if self.dh.hour % 2 == 0: return MockTarget(base) else: return { 'spi': MockTarget(base + '/something.spi'), 'spl': MockTarget(base + '/something.spl'), } def test_raise_not_implemented(): list(_get_filesystems_and_globs( lambda d: InconsistentlyOutputtingDateHourTask(d), lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))) self.assertRaises(NotImplementedError, test_raise_not_implemented) def test_wrapped_inconsistent_datehour_globs_not_inferred(self): class InconsistentlyParameterizedWrapperTask(luigi.WrapperTask): dh = luigi.DateHourParameter() def requires(self): yield TaskA(dh=self.dh - datetime.timedelta(days=1)) yield TaskB(dh=self.dh, complicator='no/worries') def test_raise_not_implemented(): list(_get_filesystems_and_globs( lambda d: InconsistentlyParameterizedWrapperTask(d), lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))) self.assertRaises(NotImplementedError, test_raise_not_implemented) class RangeMonthlyTest(unittest.TestCase): def setUp(self): # yucky to create separate callbacks; would be nicer if the callback # received an instance of a subclass of Event, so one callback could # accumulate all types @RangeMonthly.event_handler(RangeEvent.DELAY) def callback_delay(*args): self.events.setdefault(RangeEvent.DELAY, []).append(args) @RangeMonthly.event_handler(RangeEvent.COMPLETE_COUNT) def callback_complete_count(*args): self.events.setdefault(RangeEvent.COMPLETE_COUNT, []).append(args) @RangeMonthly.event_handler(RangeEvent.COMPLETE_FRACTION) def callback_complete_fraction(*args): self.events.setdefault(RangeEvent.COMPLETE_FRACTION, []).append(args) self.events = {} def _empty_subcase(self, kwargs, expected_events): calls = [] class RangeMonthlyDerived(RangeMonthly): def missing_datetimes(self, task_cls, finite_datetimes): args = [self, task_cls, finite_datetimes] calls.append(args) return args[-1][:5] task = RangeMonthlyDerived(of=CommonMonthTask, **kwargs) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) self.assertEqual(task.requires(), []) self.assertEqual(calls, []) # subsequent requires() should return the cached result, never call missing_datetimes self.assertEqual(self.events, expected_events) self.assertTrue(task.complete()) def test_stop_before_months_back(self): # nothing to do because stop is earlier self._empty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2017, 1, 3)), 'stop': datetime.date(2016, 3, 20), 'months_back': 4, 'months_forward': 20, 'reverse': True, }, { 'event.tools.range.delay': [ ('CommonMonthTask', 0), ], 'event.tools.range.complete.count': [ ('CommonMonthTask', 0), ], 'event.tools.range.complete.fraction': [ ('CommonMonthTask', 1.), ], } ) def test_start_after_months_forward(self): # nothing to do because start is later self._empty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2000, 1, 1)), 'start': datetime.datetime(2014, 3, 20), 'months_back': 4, 'months_forward': 20, }, { 'event.tools.range.delay': [ ('CommonMonthTask', 0), ], 'event.tools.range.complete.count': [ ('CommonMonthTask', 0), ], 'event.tools.range.complete.fraction': [ ('CommonMonthTask', 1.), ], } ) def _nonempty_subcase(self, kwargs, expected_finite_datetimes_range, expected_requires, expected_events): calls = [] class RangeDailyDerived(RangeMonthly): def missing_datetimes(self, finite_datetimes): calls.append((self, finite_datetimes)) return finite_datetimes[:7] task = RangeDailyDerived(of=CommonMonthTask, **kwargs) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual((min(calls[0][1]), max(calls[0][1])), expected_finite_datetimes_range) self.assertEqual(list(map(str, task.requires())), expected_requires) self.assertEqual(len(calls), 1) # subsequent requires() should return the cached result, not call missing_datetimes again self.assertEqual(self.events, expected_events) self.assertFalse(task.complete()) def test_start_long_before_months_back(self): total = (2000 - 1960) * 12 + 20 - 2 self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2000, 1, 1)), 'start': datetime.datetime(1960, 3, 2, 1), 'months_back': 5, 'months_forward': 20, }, (datetime.datetime(1999, 8, 1), datetime.datetime(2001, 8, 1)), [ 'CommonMonthTask(m=1999-08)', 'CommonMonthTask(m=1999-09)', 'CommonMonthTask(m=1999-10)', 'CommonMonthTask(m=1999-11)', 'CommonMonthTask(m=1999-12)', 'CommonMonthTask(m=2000-01)', 'CommonMonthTask(m=2000-02)', ], { 'event.tools.range.delay': [ ('CommonMonthTask', 25), ], 'event.tools.range.complete.count': [ ('CommonMonthTask', total - 7), ], 'event.tools.range.complete.fraction': [ ('CommonMonthTask', (total - 7.0) / total), ], } ) def test_start_after_long_months_back(self): total = 12 - 3 self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2014, 11, 22)), 'start': datetime.datetime(2014, 3, 1), 'task_limit': 4, 'months_back': 12 * 24, }, (datetime.datetime(2014, 3, 1), datetime.datetime(2014, 11, 1)), [ 'CommonMonthTask(m=2014-03)', 'CommonMonthTask(m=2014-04)', 'CommonMonthTask(m=2014-05)', 'CommonMonthTask(m=2014-06)', ], { 'event.tools.range.delay': [ ('CommonMonthTask', total), ], 'event.tools.range.complete.count': [ ('CommonMonthTask', total - 7), ], 'event.tools.range.complete.fraction': [ ('CommonMonthTask', (total - 7.0) / total), ], } ) def test_start_long_before_long_months_back_and_with_long_months_forward(self): total = (2025 - 2011) * 12 - 2 self._nonempty_subcase( { 'now': datetime_to_epoch(datetime.datetime(2017, 10, 22, 12, 4, 29)), 'start': datetime.date(2011, 3, 20), 'stop': datetime.date(2025, 1, 29), 'task_limit': 4, 'months_back': 3 * 12, 'months_forward': 3 * 12, }, (datetime.datetime(2014, 11, 1), datetime.datetime(2020, 10, 1)), [ 'CommonMonthTask(m=2014-11)', 'CommonMonthTask(m=2014-12)', 'CommonMonthTask(m=2015-01)', 'CommonMonthTask(m=2015-02)', ], { 'event.tools.range.delay': [ ('CommonMonthTask', (2025 - (2017 - 3)) * 12 - 10), ], 'event.tools.range.complete.count': [ ('CommonMonthTask', total - 7), ], 'event.tools.range.complete.fraction': [ ('CommonMonthTask', (total - 7.0) / total), ], } ) def test_consistent_formatting(self): task = RangeMonthly(of=CommonMonthTask, start=datetime.date(2018, 1, 4)) self.assertEqual(task._format_range([datetime.datetime(2018, 2, 3, 14), datetime.datetime(2018, 4, 5, 21)]), '[2018-02, 2018-04]') class MonthInstantiationTest(LuigiTestCase): def test_old_month_instantiation(self): """ Verify that you can still programmatically set of param as string """ class MyTask(luigi.Task): month_param = luigi.MonthParameter() def complete(self): return False range_task = RangeMonthly(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2016, 1, 1)) expected_task = MyTask(month_param=datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0]) def test_month_cli_instantiation(self): """ Verify that you can still use Range through CLI """ class MyTask(luigi.Task): task_namespace = "wohoo" month_param = luigi.MonthParameter() secret = 'some-value-to-sooth-python-linters' comp = False def complete(self): return self.comp def run(self): self.comp = True MyTask.secret = 'yay' now = str(int(datetime_to_epoch(datetime.datetime(2015, 12, 2)))) self.run_locally_split('RangeMonthly --of wohoo.MyTask --now {now} --start 2015-12 --stop 2016-01'.format(now=now)) self.assertEqual(MyTask(month_param=datetime.date(1934, 12, 1)).secret, 'yay') def test_param_name(self): class MyTask(luigi.Task): some_non_range_param = luigi.Parameter(default='woo') month_param = luigi.MonthParameter() def complete(self): return False range_task = RangeMonthly(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2016, 1, 1), param_name='month_param') expected_task = MyTask('woo', datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0]) def test_param_name_with_inferred_fs(self): class MyTask(luigi.Task): some_non_range_param = luigi.Parameter(default='woo') month_param = luigi.MonthParameter() def output(self): return MockTarget(self.month_param.strftime('/n2000y01a05n/%Y_%m-aww/21mm%Hdara21/ooo')) range_task = RangeMonthly(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2016, 1, 1), param_name='month_param') expected_task = MyTask('woo', datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0]) def test_of_param_distinction(self): class MyTask(luigi.Task): arbitrary_param = luigi.Parameter(default='foo') arbitrary_integer_param = luigi.IntParameter(default=10) month_param = luigi.MonthParameter() def complete(self): return False range_task_1 = RangeMonthly(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2016, 1, 1)) range_task_2 = RangeMonthly(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, of_params=dict(arbitrary_param="bar", abitrary_integer_param=2), start=datetime.date(2015, 12, 1), stop=datetime.date(2016, 1, 1)) self.assertNotEqual(range_task_1.task_id, range_task_2.task_id) def test_of_param_commandline(self): class MyTask(luigi.Task): task_namespace = "wohoo" month_param = luigi.MonthParameter() arbitrary_param = luigi.Parameter(default='foo') arbitrary_integer_param = luigi.IntParameter(default=10) state = (None, None) comp = False def complete(self): return self.comp def run(self): self.comp = True MyTask.state = (self.arbitrary_param, self.arbitrary_integer_param) now = str(int(datetime_to_epoch(datetime.datetime(2015, 12, 2)))) self.run_locally(['RangeMonthly', '--of', 'wohoo.MyTask', '--of-params', '{"arbitrary_param":"bar","arbitrary_integer_param":5}', '--now', '{0}'.format(now), '--start', '2015-12', '--stop', '2016-01']) self.assertEqual(MyTask.state, ('bar', 5)) class RangeDailyTest(unittest.TestCase): def test_bulk_complete_correctly_interfaced(self): class BulkCompleteDailyTask(luigi.Task): d = luigi.DateParameter() @classmethod def bulk_complete(self, parameter_tuples): return list(parameter_tuples)[:-2] def output(self): raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete") task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)), of=BulkCompleteDailyTask, start=datetime.date(2015, 11, 1), stop=datetime.date(2015, 12, 1)) expected = [ 'BulkCompleteDailyTask(d=2015-11-29)', 'BulkCompleteDailyTask(d=2015-11-30)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) def test_bulk_complete_of_params(self): class BulkCompleteDailyTask(luigi.Task): non_positional_arbitrary_argument = luigi.Parameter(default="whatever", positional=False, significant=False) d = luigi.DateParameter() arbitrary_argument = luigi.BoolParameter() @classmethod def bulk_complete(cls, parameter_tuples): ptuples = list(parameter_tuples) for t in map(cls, ptuples): assert t.arbitrary_argument return ptuples[:-2] def output(self): raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete") task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)), of=BulkCompleteDailyTask, of_params=dict(arbitrary_argument=True), start=datetime.date(2015, 11, 1), stop=datetime.date(2015, 12, 1)) expected = [ 'BulkCompleteDailyTask(d=2015-11-29, arbitrary_argument=True)', 'BulkCompleteDailyTask(d=2015-11-30, arbitrary_argument=True)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) @mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir([ '/data/2014/p/v/z/2014_/_03-_-21octor/20/ZOOO', '/data/2014/p/v/z/2014_/_03-_-23octor/20/ZOOO', '/data/2014/p/v/z/2014_/_03-_-24octor/20/ZOOO', ])) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_true) def test_missing_tasks_correctly_required(self): class SomeDailyTask(luigi.Task): d = luigi.DateParameter() def output(self): return MockTarget(self.d.strftime('/data/2014/p/v/z/%Y_/_%m-_-%doctor/20/ZOOO')) task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)), of=SomeDailyTask, start=datetime.date(2014, 3, 20), task_limit=3, days_back=3 * 365) expected = [ 'SomeDailyTask(d=2014-03-20)', 'SomeDailyTask(d=2014-03-22)', 'SomeDailyTask(d=2014-03-25)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) class RangeHourlyTest(unittest.TestCase): # fishy to mock the mock, but MockFileSystem doesn't support globs yet @mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents)) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_true) def test_missing_tasks_correctly_required(self): for task_path in task_a_paths: MockTarget(task_path) # this test takes a few seconds. Since stop is not defined, # finite_datetimes constitute many years to consider task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)), of=TaskA, start=datetime.datetime(2014, 3, 20, 17), task_limit=3, hours_back=3 * 365 * 24) actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected_a) @mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents)) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_true) def test_missing_wrapper_tasks_correctly_required(self): task = RangeHourly( now=datetime_to_epoch(datetime.datetime(2040, 4, 1)), of=CommonWrapperTask, start=datetime.datetime(2014, 3, 20, 23), stop=datetime.datetime(2014, 3, 21, 6), hours_back=30 * 365 * 24) actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected_wrapper) def test_bulk_complete_correctly_interfaced(self): class BulkCompleteHourlyTask(luigi.Task): dh = luigi.DateHourParameter() @classmethod def bulk_complete(cls, parameter_tuples): return parameter_tuples[:-2] def output(self): raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete") task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)), of=BulkCompleteHourlyTask, start=datetime.datetime(2015, 11, 1), stop=datetime.datetime(2015, 12, 1)) expected = [ 'BulkCompleteHourlyTask(dh=2015-11-30T22)', 'BulkCompleteHourlyTask(dh=2015-11-30T23)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) def test_bulk_complete_of_params(self): class BulkCompleteHourlyTask(luigi.Task): non_positional_arbitrary_argument = luigi.Parameter(default="whatever", positional=False, significant=False) dh = luigi.DateHourParameter() arbitrary_argument = luigi.BoolParameter() @classmethod def bulk_complete(cls, parameter_tuples): for t in map(cls, parameter_tuples): assert t.arbitrary_argument return parameter_tuples[:-2] def output(self): raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete") task = RangeHourly(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)), of=BulkCompleteHourlyTask, of_params=dict(arbitrary_argument=True), start=datetime.datetime(2015, 11, 1), stop=datetime.datetime(2015, 12, 1)) expected = [ 'BulkCompleteHourlyTask(dh=2015-11-30T22, arbitrary_argument=True)', 'BulkCompleteHourlyTask(dh=2015-11-30T23, arbitrary_argument=True)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_false) def test_missing_directory(self): task = RangeHourly(now=datetime_to_epoch( datetime.datetime(2014, 4, 1)), of=TaskC, start=datetime.datetime(2014, 3, 20, 23), stop=datetime.datetime(2014, 3, 21, 1)) self.assertFalse(task.complete()) expected = [ 'TaskC(dh=2014-03-20T23)', 'TaskC(dh=2014-03-21T00)'] self.assertEqual([str(t) for t in task.requires()], expected) class RangeByMinutesTest(unittest.TestCase): # fishy to mock the mock, but MockFileSystem doesn't support globs yet @mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents)) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_true) def test_missing_tasks_correctly_required(self): expected_tasks = [ 'SomeByMinutesTask(d=2016-03-31T0000)', 'SomeByMinutesTask(d=2016-03-31T0005)', 'SomeByMinutesTask(d=2016-03-31T0010)'] class SomeByMinutesTask(luigi.Task): d = luigi.DateMinuteParameter() def output(self): return MockTarget(self.d.strftime('/data/2014/p/v/z/%Y_/_%m-_-%doctor/20/%HZ%MOOO')) for task_path in task_a_paths: MockTarget(task_path) # this test takes a few seconds. Since stop is not defined, # finite_datetimes constitute many years to consider task = RangeByMinutes(now=datetime_to_epoch(datetime.datetime(2016, 4, 1)), of=SomeByMinutesTask, start=datetime.datetime(2014, 3, 20, 17), task_limit=3, minutes_back=24 * 60, minutes_interval=5) actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected_tasks) @mock.patch('luigi.mock.MockFileSystem.listdir', new=mock_listdir(mock_contents)) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_true) def test_missing_wrapper_tasks_correctly_required(self): expected_wrapper = [ 'CommonWrapperTaskMinutes(dm=2014-03-20T2300)', 'CommonWrapperTaskMinutes(dm=2014-03-20T2305)', 'CommonWrapperTaskMinutes(dm=2014-03-20T2310)', 'CommonWrapperTaskMinutes(dm=2014-03-20T2315)'] task = RangeByMinutes( now=datetime_to_epoch(datetime.datetime(2040, 4, 1, 0, 0, 0)), of=CommonWrapperTaskMinutes, start=datetime.datetime(2014, 3, 20, 23, 0, 0), stop=datetime.datetime(2014, 3, 20, 23, 20, 0), minutes_back=30 * 365 * 24 * 60, minutes_interval=5) actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected_wrapper) def test_bulk_complete_correctly_interfaced(self): class BulkCompleteByMinutesTask(luigi.Task): dh = luigi.DateMinuteParameter() @classmethod def bulk_complete(cls, parameter_tuples): return parameter_tuples[:-2] def output(self): raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete") task = RangeByMinutes(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)), of=BulkCompleteByMinutesTask, start=datetime.datetime(2015, 11, 1), stop=datetime.datetime(2015, 12, 1), minutes_interval=5) expected = [ 'BulkCompleteByMinutesTask(dh=2015-11-30T2350)', 'BulkCompleteByMinutesTask(dh=2015-11-30T2355)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) def test_bulk_complete_of_params(self): class BulkCompleteByMinutesTask(luigi.Task): non_positional_arbitrary_argument = luigi.Parameter(default="whatever", positional=False, significant=False) dh = luigi.DateMinuteParameter() arbitrary_argument = luigi.BoolParameter() @classmethod def bulk_complete(cls, parameter_tuples): for t in map(cls, parameter_tuples): assert t.arbitrary_argument return parameter_tuples[:-2] def output(self): raise RuntimeError("Shouldn't get called while resolving deps via bulk_complete") task = RangeByMinutes(now=datetime_to_epoch(datetime.datetime(2015, 12, 1)), of=BulkCompleteByMinutesTask, of_params=dict(arbitrary_argument=True), start=datetime.datetime(2015, 11, 1), stop=datetime.datetime(2015, 12, 1), minutes_interval=5) expected = [ 'BulkCompleteByMinutesTask(dh=2015-11-30T2350, arbitrary_argument=True)', 'BulkCompleteByMinutesTask(dh=2015-11-30T2355, arbitrary_argument=True)', ] actual = [str(t) for t in task.requires()] self.assertEqual(actual, expected) @mock.patch('luigi.mock.MockFileSystem.exists', new=mock_exists_always_false) def test_missing_directory(self): task = RangeByMinutes(now=datetime_to_epoch( datetime.datetime(2014, 3, 21, 0, 0)), of=TaskMinutesC, start=datetime.datetime(2014, 3, 20, 23, 11), stop=datetime.datetime(2014, 3, 20, 23, 21), minutes_interval=5) self.assertFalse(task.complete()) expected = [ 'TaskMinutesC(dm=2014-03-20T2315)', 'TaskMinutesC(dm=2014-03-20T2320)'] self.assertEqual([str(t) for t in task.requires()], expected) class RangeInstantiationTest(LuigiTestCase): def test_old_instantiation(self): """ Verify that you can still programatically set of param as string """ class MyTask(luigi.Task): date_param = luigi.DateParameter() def complete(self): return False range_task = RangeDailyBase(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2015, 12, 2)) expected_task = MyTask(date_param=datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0]) def test_cli_instantiation(self): """ Verify that you can still use Range through CLI """ class MyTask(luigi.Task): task_namespace = "wohoo" date_param = luigi.DateParameter() secret = 'some-value-to-sooth-python-linters' comp = False def complete(self): return self.comp def run(self): self.comp = True MyTask.secret = 'yay' now = str(int(datetime_to_epoch(datetime.datetime(2015, 12, 2)))) self.run_locally_split('RangeDailyBase --of wohoo.MyTask --now {now} --start 2015-12-01 --stop 2015-12-02'.format(now=now)) self.assertEqual(MyTask(date_param=datetime.date(1934, 12, 1)).secret, 'yay') def test_param_name(self): class MyTask(luigi.Task): some_non_range_param = luigi.Parameter(default='woo') date_param = luigi.DateParameter() def complete(self): return False range_task = RangeDailyBase(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2015, 12, 2), param_name='date_param') expected_task = MyTask('woo', datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0]) def test_param_name_with_inferred_fs(self): class MyTask(luigi.Task): some_non_range_param = luigi.Parameter(default='woo') date_param = luigi.DateParameter() def output(self): return MockTarget(self.date_param.strftime('/n2000y01a05n/%Y_%m-_-%daww/21mm%Hdara21/ooo')) range_task = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2015, 12, 2), param_name='date_param') expected_task = MyTask('woo', datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0]) def test_of_param_distinction(self): class MyTask(luigi.Task): arbitrary_param = luigi.Parameter(default='foo') arbitrary_integer_param = luigi.IntParameter(default=10) date_param = luigi.DateParameter() def complete(self): return False range_task_1 = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2015, 12, 2)) range_task_2 = RangeDaily(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, of_params=dict(arbitrary_param="bar", abitrary_integer_param=2), start=datetime.date(2015, 12, 1), stop=datetime.date(2015, 12, 2)) self.assertNotEqual(range_task_1.task_id, range_task_2.task_id) def test_of_param_commandline(self): class MyTask(luigi.Task): task_namespace = "wohoo" date_param = luigi.DateParameter() arbitrary_param = luigi.Parameter(default='foo') arbitrary_integer_param = luigi.IntParameter(default=10) state = (None, None) comp = False def complete(self): return self.comp def run(self): self.comp = True MyTask.state = (self.arbitrary_param, self.arbitrary_integer_param) now = str(int(datetime_to_epoch(datetime.datetime(2015, 12, 2)))) self.run_locally(['RangeDailyBase', '--of', 'wohoo.MyTask', '--of-params', '{"arbitrary_param":"bar","arbitrary_integer_param":5}', '--now', '{0}'.format(now), '--start', '2015-12-01', '--stop', '2015-12-02']) self.assertEqual(MyTask.state, ('bar', 5))
apache-2.0
-2,409,105,169,051,072,000
39.292386
151
0.545548
false
Jajcus/pyxmpp
pyxmpp/expdict.py
1
4727
# # (C) Copyright 2003-2010 Jacek Konieczny <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License Version # 2.1 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # """Dictionary with item expiration.""" __docformat__="restructuredtext en" import time import threading __all__ = ['ExpiringDictionary'] sentinel = object() class ExpiringDictionary(dict): """An extension to standard Python dictionary objects which implements item expiration. Each item in ExpiringDictionary has its expiration time assigned, after which the item is removed from the mapping. :Ivariables: - `_timeouts`: a dictionary with timeout values and timeout callback for stored objects. - `_default_timeout`: the default timeout value (in seconds from now). - `_lock`: access synchronization lock. :Types: - `_timeouts`: `dict` - `_default_timeout`: `int` - `_lock`: `threading.RLock`""" __slots__=['_timeouts','_default_timeout','_lock'] def __init__(self,default_timeout=300): """Initialize an `ExpiringDictionary` object. :Parameters: - `default_timeout`: default timeout value for stored objects. :Types: - `default_timeout`: `int`""" dict.__init__(self) self._timeouts={} self._default_timeout=default_timeout self._lock=threading.RLock() def __delitem__(self,key): self._lock.acquire() try: del self._timeouts[key] return dict.__delitem__(self,key) finally: self._lock.release() def __getitem__(self,key): self._lock.acquire() try: self._expire_item(key) return dict.__getitem__(self,key) finally: self._lock.release() def pop(self,key,default=sentinel): self._lock.acquire() try: self._expire_item(key) del self._timeouts[key] if default is not sentinel: return dict.pop(self,key,default) else: return dict.pop(self,key) finally: self._lock.release() def __setitem__(self,key,value): return self.set_item(key,value) def set_item(self,key,value,timeout=None,timeout_callback=None): """Set item of the dictionary. :Parameters: - `key`: the key. - `value`: the object to store. - `timeout`: timeout value for the object (in seconds from now). - `timeout_callback`: function to be called when the item expires. The callback should accept none, one (the key) or two (the key and the value) arguments. :Types: - `key`: any hashable value - `value`: any python object - `timeout`: `int` - `timeout_callback`: callable""" self._lock.acquire() try: if not timeout: timeout=self._default_timeout self._timeouts[key]=(time.time()+timeout,timeout_callback) return dict.__setitem__(self,key,value) finally: self._lock.release() def expire(self): """Do the expiration of dictionary items. Remove items that expired by now from the dictionary.""" self._lock.acquire() try: for k in self._timeouts.keys(): self._expire_item(k) finally: self._lock.release() def _expire_item(self,key): """Do the expiration of a dictionary item. Remove the item if it has expired by now. :Parameters: - `key`: key to the object. :Types: - `key`: any hashable value""" (timeout,callback)=self._timeouts[key] if timeout<=time.time(): item = dict.pop(self, key) del self._timeouts[key] if callback: try: callback(key,item) except TypeError: try: callback(key) except TypeError: callback() # vi: sts=4 et sw=4
lgpl-2.1
7,930,685,050,958,011,000
30.939189
80
0.576476
false
ucsd-ccbb/Oncolist
src/restLayer/app/TermIdentifier.py
1
36204
__author__ = 'aarongary' import sys import pymongo import requests import MyGeneInfo from itertools import islice from app.util import set_status, create_edges_index from app.status import Status from bson.json_util import dumps from models.TermResolver import TermAnalyzer import ElasticSearch import os from sklearn.linear_model import LinearRegression import numpy as np import app import ESearch def bulk_identify_terms(terms): tr = TermAnalyzer() termsClassified = tr.process_terms_bulk(terms) return_value = { 'termClassification': termsClassified } return return_value def search_term_description(term): tr = TermAnalyzer() termsClassified = tr.process_terms_bulk(term) entrez_summary = ESearch.get_gene_summary_from_entrez(term) return_value = { 'termClassification': termsClassified, 'entrez_summary': entrez_summary } return return_value def bulk_identify_terms2(terms): term_with_id = [] #======================== # Process GENOME terms #======================== analyzed_terms = process_genome_terms(terms) for genome_term in analyzed_terms['special_terms']: a = { 'probabilitiesMap': { 'gene': '0.0', 'icd10': '0.0', 'drug': '0.0', 'disease': '0.0', 'genome': '1.0' }, 'status': 'success', 'termId': genome_term['familiar_term'], 'desc': 'Genome', 'geneSymbol': genome_term['familiar_term'], 'termTitle': genome_term['familiar_term'] + ' (' + genome_term['latin'] + ')' } term_with_id.append(a) terms = analyzed_terms['terms'] #======================== # Process DISEASE terms #======================== analyzed_terms = process_disease_terms(terms) for disease_term in analyzed_terms['special_terms']: a = { 'probabilitiesMap': { 'gene': '0.0', 'icd10': '0.0', 'drug': '0.0', 'disease': '1.0', 'genome': '0.0' }, 'status': 'success', 'termId': disease_term['familiar_term'], 'desc': 'Disease', 'geneSymbol': disease_term['familiar_term'], 'termTitle': disease_term['familiar_term'] + ' (' + disease_term['latin'] + ')' } term_with_id.append(a) terms = analyzed_terms['terms'] if(len(terms) > 0): queryTermArray = terms.split(',') types = ['gene','icd10','drug','disease','genome'] for queryTerm in queryTermArray: termTitle = queryTerm print queryTerm a = { 'probabilitiesMap': {}, 'status': 'success', 'termId': queryTerm.upper(), 'desc': '', 'geneSymbol': '', 'termTitle': queryTerm } term_result = identify_term(queryTerm) #tt = dumps(term_result) if(term_result is None or term_result.count() < 1): term_alt_result = identify_alt_term(queryTerm) #MyGeneInfo.get_gene_info_by_id(queryTerm) cc = dumps(term_alt_result) if(term_alt_result['term'] == 'UNKNOWN'): a['probabilitiesMap'] = { 'gene': '0.0', 'icd10': '0.0', 'drug': '0.0', 'disease': '0.0', 'genome': '0.0' } a['status'] = 'unknown' term_with_id.append(a) else: termDesc = '' termGeneSymbol = '' term_result_types_array = [] if(term_alt_result['type'] == 'GENE'): termDesc = term_alt_result['desc'] termGeneSymbol = term_alt_result['geneSymbol'] termTitle = queryTerm.upper() + ' (' + termGeneSymbol.upper() + ')' a['termId'] = termGeneSymbol.upper() if(term_alt_result['type'] not in term_result_types_array): term_result_types_array.append(term_alt_result['type']) total_found_terms = float(len(term_result_types_array)) for k in types: if(k.upper() in term_result_types_array): a['probabilitiesMap'][k] = str(1.0/total_found_terms) else: a['probabilitiesMap'][k] = str(0.0) a['desc'] = termDesc a['geneSymbol'] = termGeneSymbol a['termTitle'] = termTitle term_with_id.append(a) else: termDesc = '' termGeneSymbol = '' term_result_types_array = [] #tr = dumps(term_result) for item_type in term_result: if(item_type['type'] == 'GENE'): termDesc = item_type['desc'] termGeneSymbol = item_type['geneSymbol'] if(len(queryTerm) > 12 and queryTerm[:3] == 'ENS'): termTitle = termGeneSymbol.upper() + ' (' + queryTerm.upper() + ')' a['termId'] = termGeneSymbol.upper() if(item_type['type'] not in term_result_types_array): term_result_types_array.append(item_type['type']) total_found_terms = float(len(term_result_types_array)) for k in types: if(k.upper() in term_result_types_array): a['probabilitiesMap'][k] = str(1.0/total_found_terms) else: a['probabilitiesMap'][k] = str(0.0) a['desc'] = termDesc a['geneSymbol'] = termGeneSymbol a['termTitle'] = termTitle term_with_id.append(a) #print dumps(a) #term_with_id.append(term_result) return_value = { 'termClassification': term_with_id } #print dumps(return_value) return dumps(return_value) def identify_term(name): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms2 results = allterms.find({'term': name.upper(),'genomeType': 'human'}) return None if results is None else results def identify_alt_term(name): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms2 gene_alt_id = MyGeneInfo.get_gene_info_by_id(name) results = allterms.find_one({'term': gene_alt_id.upper(),'genomeType': 'human'}) if(results is None): results = { 'term': 'UNKNOWN', 'desc': 'UNKNOWN' } return results #def identify_term(name): # client = pymongo.MongoClient() # db = client.identifiers # allterms = db.allterms # result = allterms.find_one({'term': name.upper()}) # return None if result is None else result def add_terms_from_file(): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms2 #url = 'http://geneli.st:8181/add-terms1.tsv' #url = 'http://geneli.st:8181/mirna-terms.txt' url = 'http://geneli.st:8181/mirna_label.txt' r = requests.get(url) lines = list(r.iter_lines()) count=0 for idx, line in enumerate(lines): term, term_type = line.split('\t') term_to_add = { 'term': term.upper(), 'type': term_type } allterms.save(term_to_add) count = count + 1 print 'Done' print str(count) def load_variant_to_gene_from_file(): client = pymongo.MongoClient() db = client.identifiers variants = db.variants variants.drop() f_path = os.path.abspath('./variant_vs_gene.txt') f = open(f_path, 'r') count = 0 for line in f: count += 1 if(count % 5000 == 0): print str(count) + ' (' + "{0:.2f}%".format(float(count)/89000000 * 100) + ')' #print str(count) + ' (' + str(count/89000000) + ')c' #if(count > 10000): # break variant, gene = line.split('\t') #print variant + ' - ' + gene insertThisRecord = { 'geneSymbol': gene.rstrip().upper(), 'genomeType': 'human', 'term': variant.upper(), 'type': 'GENE' } variants.save(insertThisRecord) variants.create_index([ ("term", pymongo.ASCENDING) ]) def get_mirna_from_cluster_file(): f = open('/Users/aarongary/Development/DataSets/Terms/BRCA.json', 'r') count = 0 for line in f: if('hsa-' in line): print count count += 1 hsa_items = line.split('hsa-') for hsa_item in hsa_items: print hsa_item def add_biomart_terms_from_file(): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms2 allterms.drop() #filesToParse = [{'genomeType': 'human', 'url': 'http://geneli.st:8181/biomart/human Homo sapiens protein coding genes.txt','termType': 'GENE'}, # {'genomeType': 'human', 'url': 'http://geneli.st:8181/biomart/add-terms-non-GENE.tsv','termType': 'NONGENE'}] terms_host = 'http://ec2-52-40-169-254.us-west-2.compute.amazonaws.com:3000/Biomart' filesToParse = [ #{'genomeType': 'dog', 'url': terms_host + '/dog Canis familiaris protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'fruitfly', 'url': terms_host + '/fruitfly Drosophila melanogaster protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'monkey', 'url': terms_host + '/monkey Macaca mulatta protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'mouse', 'url': terms_host + '/mouse Mus musculus protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'rat', 'url': terms_host + '/rat Rattus norvegicus protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'worm', 'url': terms_host + '/worm Caenorhabditis elegans protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'zebrafish', 'url': terms_host + '/zebrafish Danio rerio protein coding genes.txt','termType': 'GENE'}, #{'genomeType': 'dog', 'url': terms_host + '/dog Canis familiaris mirna genes.txt','termType': 'GENE'}, #{'genomeType': 'fruitfly', 'url': terms_host + '/fruitfly Drosophila melanogaster pre-mirna genes.txt','termType': 'GENE'}, #{'genomeType': 'monkey', 'url': terms_host + '/monkey Macaca mulatta mirna genes.txt','termType': 'GENE'}, #{'genomeType': 'mouse', 'url': terms_host + '/mouse Mus musculus mirna genes.txt','termType': 'GENE'}, #{'genomeType': 'rat', 'url': terms_host + '/rat Rattus norvegicus mirna genes.txt','termType': 'GENE'}, #{'genomeType': 'worm', 'url': terms_host + '/worm Caenorhabditis elegans mirna genes.txt','termType': 'GENE'}, #{'genomeType': 'zebrafish', 'url': terms_host + '/zebrafish Danio rerio mirna genes.txt','termType': 'GENE'}, {'genomeType': 'human', 'url': terms_host + '/add-terms-DISEASE.tsv','termType': 'NONGENE'}, {'genomeType': 'human', 'url': terms_host + '/human Homo sapiens protein coding genes.txt','termType': 'GENE'}, {'genomeType': 'human', 'url': terms_host + '/human Homo sapiens miRNA genes.txt','termType': 'GENE'} ] for f in filesToParse: r = requests.get(f['url'], stream=True) lines = r.iter_lines() lines.next() # ignore header count = 0 for line in lines: count += 1 if(count % 1000 == 0): print count try: if(f['termType'] == 'GENE'): ensGID, desc, geneType, geneStatus, geneSymbol = line.split('\t') insertThisRecord = { 'ensGID': ensGID, 'desc': desc, 'geneType': geneType, 'geneStatus': geneStatus, 'geneSymbol': geneSymbol, 'genomeType': f['genomeType'], 'term': ensGID.upper(), 'type': 'GENE' } allterms.save(insertThisRecord) insertThisInvertedRecord = { 'ensGID': ensGID, 'desc': desc, 'geneType': geneType, 'geneStatus': geneStatus, 'geneSymbol': geneSymbol, 'genomeType': f['genomeType'], 'term': geneSymbol.upper(), 'type': 'GENE' } allterms.save(insertThisInvertedRecord) else: fTerm, fType = line.split('\t') allterms.save({'genomeType': 'human','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'dog','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'fruitfly','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'monkey','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'mouse','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'rat','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'worm','term': fTerm.upper(),'type': fType}) #allterms.save({'genomeType': 'zebrafish','term': fTerm.upper(),'type': fType}) except Exception as e: print 'Didnt work' + e.message print 'Done with file' allterms.ensure_index([("ensGID" , pymongo.ASCENDING)]) allterms.ensure_index([("term" , pymongo.ASCENDING)]) allterms.ensure_index([("type" , pymongo.ASCENDING)]) allterms.ensure_index([("geneType" , pymongo.ASCENDING)]) # allterms.create_indexes([ # pymongo.IndexModel([('ensGID', pymongo.ASCENDING)]), # pymongo.IndexModel([('term', pymongo.ASCENDING)]), # pymongo.IndexModel([('type', pymongo.ASCENDING)]), # pymongo.IndexModel([('geneType', pymongo.ASCENDING)]) # ]) print 'Done' return "" def add_terms_from_file_autocomplete(): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms #url = 'http://geneli.st:8181/add-terms3a.tsv' url = 'http://geneli.st:8181/add-terms3.tsv' r = requests.get(url) lines = list(r.iter_lines()) count=0 for idx, line in enumerate(lines): term, term_type = line.split('\t') #print term term_to_add = { 'term': term.upper(), 'type': term_type } allterms.save(term_to_add) count = count + 1 if(count % 200 == 0): print count #dumps(term_to_add) #allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])]) print 'Done' def add_terms_from_elasticsearch_autocomplete(): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms3 count=0 phenotypes = ElasticSearch.get_clinvar_phenotypes() for term in phenotypes: term_to_add = { 'term': term.upper(), 'type': 'ICD10' } allterms.save(term_to_add) count = count + 1 if(count % 200 == 0): print count #dumps(term_to_add) #allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])]) print 'Done' def load_terms_from_file(): client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms allterms.drop() url = 'http://ec2-52-26-19-122.us-west-2.compute.amazonaws.com:8080/all-terms3.tsv' r = requests.get(url) lines = list(r.iter_lines()) count=0 for idx, line in enumerate(lines): term, term_type = line.split('\t') #print term term_to_add = { 'term': term.upper(), 'type': term_type } allterms.save(term_to_add) count = count + 1 if(count % 200 == 0): print count #dumps(term_to_add) allterms.create_indexes([pymongo.IndexModel([('term', pymongo.ASCENDING)])]) print 'Done' def process_genome_terms(terms): terms_uppercase = terms.upper() return_value = [] genome_id_kv = [ {'k': 'CANIS,FAMILIARIS', 'v': 'DOG'}, {'k': 'DROSOPHILA,MELANOGASTER', 'v': 'FRUITFLY'}, {'k': 'HOMO,SAPIEN', 'v': 'HUMAN'}, {'k': 'MACACA,MULATTA', 'v': 'MONKEY'}, {'k': 'MUS,MUSCULUS', 'v': 'MOUSE'}, {'k': 'RATTUS,NORVEGICUS', 'v': 'RAT'}, {'k': 'CAENORHABDITIS,ELEGANS', 'v': 'WORM'}, {'k': 'DANIO,RERIO', 'v': 'ZEBRAFISH'} ] for kv in genome_id_kv: if(kv['k'] in terms_uppercase): terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',') return_value.append({'latin': kv['k'].replace(',',' '), 'familiar_term': kv['v']}) if(terms_uppercase[0:1] == ','): terms_uppercase = terms_uppercase[1:-1] if(terms_uppercase == ','): terms_uppercase = '' print terms_uppercase return {'terms': terms_uppercase, 'special_terms': return_value} def process_disease_terms(terms): terms_uppercase = terms.upper() return_value = [] genome_id_kv = [ {'k': 'BLADDER,CANCER', 'v': 'BLCA'}, {'k': 'BRAIN,CANCER', 'v': 'LGG'}, {'k': 'BREAST,CANCER', 'v': 'BRCA'}, {'k': 'CERVICAL,CANCER', 'v': 'CESC'}, {'k': 'ENDOCERVICAL,CANCER', 'v': 'CESC'}, {'k': 'CERVICAL,CANCER', 'v': 'CESC'}, {'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'}, {'k': 'BILE,DUCT,CANCER', 'v': 'CHOL'}, {'k': 'COLON,CANCER', 'v': 'COAD'}, {'k': 'ESOPHAGEAL,CANCER', 'v': 'ESCA'}, {'k': 'GLIOBLASTOMA,CANCER', 'v': 'GBM'}, #Wikify {'k': 'HEAD,AND,NECK,CANCER', 'v': 'HNSC'}, {'k': 'NECK,CANCER', 'v': 'HNSC'}, {'k': 'HEAD,CANCER', 'v': 'HNSC'}, {'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'}, {'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'}, #Wikify {'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'}, {'k': 'LIVER,CANCER', 'v': 'LIHC'}, {'k': 'LUNG,CANCER', 'v': 'LUAD'}, {'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'}, #Wikify {'k': 'LYMPHOID,CANCER', 'v': 'DLBC'}, {'k': 'LYMPHOMA,CANCER', 'v': 'DLBC'}, {'k': 'MESOTHELIOMA,CANCER', 'v': 'MESO'}, {'k': 'OVARIAN,CANCER', 'v': 'OV'}, {'k': 'PANCREATIC,CANCER', 'v': 'PAAD'}, {'k': 'PHEOCHROMOCYTOMA,CANCER', 'v': 'PCPG'}, {'k': 'PARAGANGLIOMA,CANCER', 'v': 'PCPG'}, {'k': 'PROSTATE,CANCER', 'v': 'PRAD'}, {'k': 'RECTUM,CANCER', 'v': 'READ'}, {'k': 'SARCOMA,CANCER', 'v': 'SARC'}, {'k': 'SKIN,CANCER', 'v': 'SKCM'}, {'k': 'STOMACH,CANCER', 'v': 'STAD'}, {'k': 'TESTICULAR,CANCER', 'v': 'TGCT'}, {'k': 'THYMOMA,CANCER', 'v': 'THYM'}, #Wikify {'k': 'THYROID,CANCER', 'v': 'THCA'}, {'k': 'UTERINE,CANCER', 'v': 'UCS'}, {'k': 'UTERINE,CORPUS,ENDOMETRIAL,CANCER', 'v': 'UCEC'}, #Wikify {'k': 'UVEAL,MELANOMA,CANCER', 'v': 'UVM'}, {'k': 'UVEAL,CANCER', 'v': 'UVM'}, {'k': 'LEUKEMIA', 'v': 'LAML'}, {'k': 'MYELOID,LEUKEMIA', 'v': 'LAML'}, {'k': 'ADRENOCORTICAL,CARCINOMA', 'v': 'ACC'}, {'k': 'BLADDER,UROTHELIAL,CARCINOMA', 'v': 'BLCA'}, {'k': 'BRAIN,LOWER,GRADE,GLIOMA', 'v': 'LGG'}, {'k': 'BREAST,INVASIVE,CARCINOMA', 'v': 'BRCA'}, {'k': 'CERVICAL,SQUAMOUS,CELL,CARCINOMA', 'v': 'CESC'}, {'k': 'ENDOCERVICAL,ADENOCARCINOMA', 'v': 'CESC'}, {'k': 'CHOLANGIOCARCINOMA', 'v': 'CHOL'}, {'k': 'COLON,ADENOCARCINOMA', 'v': 'COAD'}, {'k': 'ESOPHAGEAL,CARCINOMA', 'v': 'ESCA'}, {'k': 'GLIOBLASTOMA,MULTIFORME', 'v': 'GBM'}, {'k': 'HEAD,AND,NECK,SQUAMOUS,CELL,CARCINOMA', 'v': 'HNSC'}, {'k': 'KIDNEY,CHROMOPHOBE', 'v': 'KICH'}, {'k': 'KIDNEY,RENAL,CLEAR,CELL,CARCINOMA', 'v': 'KIRC'}, {'k': 'KIDNEY,RENAL,PAPILLARY,CELL,CARCINOMA', 'v': 'KIRP'}, {'k': 'LIVER,HEPATOCELLULAR,CARCINOMA', 'v': 'LIHC'}, {'k': 'LUNG,ADENOCARCINOMA', 'v': 'LUAD'}, {'k': 'LUNG,SQUAMOUS,CELL,CARCINOMA', 'v': 'LUSC'}, {'k': 'LYMPHOID,NEOPLASM,DIFFUSE,LARGE,B-CELL,LYMPHOMA', 'v': 'DLBC'}, {'k': 'MESOTHELIOMA', 'v': 'MESO'}, {'k': 'OVARIAN,SEROUS,CYSTADENOCARCINOMA', 'v': 'OV'}, {'k': 'PANCREATIC,ADENOCARCINOMA', 'v': 'PAAD'}, {'k': 'PHEOCHROMOCYTOMA', 'v': 'PCPG'}, {'k': 'PARAGANGLIOMA', 'v': 'PCPG'}, {'k': 'PROSTATE,ADENOCARCINOMA', 'v': 'PRAD'}, {'k': 'RECTUM,ADENOCARCINOMA', 'v': 'READ'}, {'k': 'SARCOMA', 'v': 'SARC'}, {'k': 'SKIN,CUTANEOUS,MELANOMA', 'v': 'SKCM'}, {'k': 'STOMACH,ADENOCARCINOMA', 'v': 'STAD'}, {'k': 'TESTICULAR,GERM,CELL,TUMORS', 'v': 'TGCT'}, {'k': 'THYMOMA', 'v': 'THYM'}, {'k': 'THYROID,CARCINOMA', 'v': 'THCA'}, {'k': 'UTERINE,CARCINOSARCOMA', 'v': 'UCS'}, {'k': 'UTERINE,CORPUS,ENDOMETRIAL,CARCINOMA', 'v': 'UCEC'}, {'k': 'UVEAL,MELANOMA', 'v': 'UVM'} ] for kv in genome_id_kv: if(kv['k'] in terms_uppercase): terms_uppercase = terms_uppercase.replace(kv['k'], '').replace(',,',',') return_value.append({'latin': kv['k'].replace(',',' '), 'familiar_term': kv['v']}) if(terms_uppercase[0:1] == ','): terms_uppercase = terms_uppercase[1:-1] if(terms_uppercase == ','): terms_uppercase = '' print terms_uppercase return {'terms': terms_uppercase, 'special_terms': return_value} def auto_complete_search(term): tr = TermAnalyzer() termsClassified = tr.identify_term_partial(term) return_value = { 'termClassification': termsClassified } return return_value def test_linear_classifier(): est = LinearRegression(fit_intercept=False) # random training data X = np.random.rand(10, 2) y = np.random.randint(2, size=10) est.fit(X, y) est.coef_ # access coefficients def load_disease_groups(): disease_groups_array = [{ 'genomeType': 'human', 'term': 'Adrenocortical Cancer ', 'group': 'Adrenal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Adrenocortical Carcinoma ', 'group': 'Adrenal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Pheochromocytoma and Paraganglioma ', 'group': 'Adrenal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Cholangiocarcinoma ', 'group': 'Bile', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Cholangiocarcinoma ', 'group': 'Bile', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Bladder Cancer', 'group': 'Bladder', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Bladder Urothelial Carcinoma ', 'group': 'Bladder', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Brain Lower Grade Glioma ', 'group': 'Brain', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Glioblastoma ', 'group': 'Brain', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Glioblastoma Multiforme', 'group': 'Brain', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Glioblastoma Multiforme and Brain Lower Grade Glioma ', 'group': 'Brain', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Glioma High Grade', 'group': 'Brain', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Breast Invasive Carcinoma ', 'group': 'Breast', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Breast Tumors RNA', 'group': 'Breast', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Cervical Cancer ChemoradioResistant', 'group': 'Cervical', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Cervical Squamous Cell Carcinoma and Endocervical Adenocarcinoma ', 'group': 'Cervical', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Colon Adenocarcinoma', 'group': 'Colon', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Colon Adenocarcinoma and Rectum adenocarcinoma ', 'group': 'colon', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Colon Cancer ', 'group': 'colon', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Ulcerative Colitis Colon Inflammation ', 'group': 'colon', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Endometrial Cancer Stage I', 'group': 'Endometrial', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Esophageal Cancer', 'group': 'Esophagus', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Esophageal Carcinoma', 'group': 'Esophagus', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Head and Neck ', 'group': 'HeadAndNeck', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Head and Neck Squamous Cell Carcinoma ', 'group': 'HeadAndNeck', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Kidney Chromophobe ', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Kidney Chromophobe and Kidney Renal Clear Cell Carcinoma and Kidney Renal Papillary Cell Carcinoma', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Kidney Renal Clear Cell Carcinoma ', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Kidney Renal Clear Cell Carcinoma ', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Kidney Renal Papillary Cell Carcinoma ', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Renal Cell Carcinoma', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Acute Myeloid Leukemia ', 'group': 'Leukemia', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Acute Myeloid Leukemia ', 'group': 'Leukemia', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Hepatocellular Carcinoma ', 'group': 'Liver', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Liver Hepatocellular Carcinoma ', 'group': 'Liver', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Liver Hepatocellular Carcinoma Early Stage Cirrhosis ', 'group': 'Liver', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Blood Lung Cancer', 'group': 'Lung', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Blood Lung Cancer Stage I ', 'group': 'Lung', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Lung Adenocarcinoma ', 'group': 'Lung', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Lung Squamous Cell Carcinoma ', 'group': 'Lung', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Diffuse Large B-Cell Lymphoma', 'group': 'Lymphoma', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'group': 'Lymphoma', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Mesothelioma ', 'group': 'Ovarian', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Ovarian Cancer', 'group': 'Ovarian', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Ovarian Serous Cystadenocarcinoma ', 'group': 'Ovarian', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Pancreatic ', 'group': 'Pancreatic', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Pancreatic Adenocarcinoma ', 'group': 'Pancreatic', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Pancreatic Ductal Adenocarcinoma', 'group': 'Pancreatic', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Prostate Adenocarcinoma', 'group': 'Prostate', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Prostate Carcinoma ', 'group': 'Prostate', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Rectal Cancer ', 'group': 'Rectal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Rectum Adenocarcinoma ', 'group': 'Rectal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Sarcoma ', 'group': 'Sarcoma', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Sarcoma ', 'group': 'Sarcoma', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Melanoma Malignant ', 'group': 'Skin', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Skin Cutaneous Melanoma', 'group': 'Skin', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Stomach Adenocarcinoma ', 'group': 'Stomach', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Stomach and Esophageal Carcinoma', 'group': 'Stomach', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Stomach Cancer 126 ', 'group': 'Stomach', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Testicular Germ Cell Tumors ', 'group': 'Testicular', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Thymoma ', 'group': 'Thymus', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Thyroid Cancer', 'group': 'Thyroid', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Thyroid Carcinoma', 'group': 'Thyroid', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Uterine Carcinosarcoma ', 'group': 'Uterine', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Uterine Corpus Endometrial Carcinoma ', 'group': 'Uterine', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Uveal Melanoma', 'group': 'Uveal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Uveal Melanoma', 'group': 'Uveal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Adrenal ', 'group': 'Adrenal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Bile ', 'group': 'Bile', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Bladder ', 'group': 'Bladder', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Brain', 'group': 'Brain', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Breast ', 'group': 'Breast', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Cervical', 'group': 'Cervical', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'colon', 'group': 'colon', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Endometrial', 'group': 'Endometrial', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Esophagus ', 'group': 'Esophagus', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'HeadAndNeck', 'group': 'HeadAndNeck', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Kidney ', 'group': 'Kidney', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Leukemia', 'group': 'Leukemia', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Liver', 'group': 'Liver', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Lung ', 'group': 'Lung', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Lymphoma', 'group': 'Lymphoma', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Ovarian ', 'group': 'Ovarian', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Pancreatic ', 'group': 'Pancreatic', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Prostate', 'group': 'Prostate', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Rectal ', 'group': 'Rectal', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Sarcoma ', 'group': 'Sarcoma', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Skin ', 'group': 'Skin', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Stomach ', 'group': 'Stomach', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Testicular ', 'group': 'Testicular', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Thymus ', 'group': 'Thymus', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Thyroid ', 'group': 'Thyroid', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Uterine ', 'group': 'Uterine', 'type': 'DISEASE' }, { 'genomeType': 'human', 'term': 'Uveal', 'group': 'Uveal', 'type': 'DISEASE' }] client = pymongo.MongoClient() db = client.identifiers allterms = db.allterms2 #allterms.drop() for disease in disease_groups_array: allterms.save({'genomeType': disease['genomeType'],'term': disease['term'].upper(),'type': disease['type'], 'group': disease['group']})
mit
2,962,690,134,142,557,700
28.196774
149
0.487626
false
robinson96/GRAPE
vine/grapeMenu.py
1
6832
import traceback import addSubproject import bundle import branches import checkout import clone import commit import config import deleteBranch import foreach import grapeConfig import grapeGit as git import hooks import merge import mergeDevelop import mergeRemote import newFlowBranch import newWorkingTree import publish import pull import push import quit import resolveConflicts import resumable import review import stash import status import grapeTest as test import updateLocal import updateSubproject import updateView import utility import version import walkthrough ####################################################################### #The Menu class - encapsulates menu options and sections. # Menu Options are the objects that perform git-related or bitbucket-related tasks. # sections are groupings of menu options that are displayed together. ###################################################################### __menuInstance = None def menu(): global __menuInstance if __menuInstance is None: __menuInstance = _Menu() grapeConfig.readDefaults() grapeConfig.read() __menuInstance.postInit() return __menuInstance def _resetMenu(): """ Resets the Singleton Instance. Meant for testing purposes only. """ global __menuInstance __menuInstance = None grapeConfig.resetGrapeConfig() class _Menu(object): def __init__(self): self._options = {} #Add menu classes self._optionLookup = {} #Add/order your menu option here self._options = [addSubproject.AddSubproject(), bundle.Bundle(), bundle.Unbundle(), branches.Branches(), status.Status(), stash.Stash(), checkout.Checkout(), push.Push(), pull.Pull(), commit.Commit(), publish.Publish(), clone.Clone(), config.Config(), grapeConfig.WriteConfig(), foreach.ForEach(), merge.Merge(), mergeDevelop.MergeDevelop(), mergeRemote.MergeRemote(), deleteBranch.DeleteBranch(), newWorkingTree.NewWorkingTree(), resolveConflicts.ResolveConflicts(), review.Review(), test.Test(), updateLocal.UpdateLocal(), updateSubproject.UpdateSubproject(), hooks.InstallHooks(), hooks.RunHook(), updateView.UpdateView(), version.Version(), walkthrough.Walkthrough(), quit.Quit()] #Add/order the menu sections here self._sections = ['Getting Started', 'Code Reviews', 'Workspace', 'Merge', 'Gitflow Tasks', 'Hooks', 'Patches', 'Project Management', 'Other'] def postInit(self): # add dynamically generated (dependent on grapeConfig) options here self._options = self._options + newFlowBranch.NewBranchOptionFactory().createNewBranchOptions(grapeConfig. grapeConfig()) for currOption in self._options: self._optionLookup[currOption.key] = currOption ####### MENU STUFF ######################################################################### def getOption(self, choice): try: return self._optionLookup[choice] except KeyError: print("Unknown option '%s'" % choice) return None def applyMenuChoice(self, choice, args=None, option_args=None, globalArgs=None): chosen_option = self.getOption(choice) if chosen_option is None: return False if args is None or len(args) == 0: args = [chosen_option._key] #first argument better be the key if args[0] != chosen_option._key: args = [chosen_option._key]+args # use optdoc to parse arguments to the chosen_option. # utility.argParse also does the magic of filling in defaults from the config files as appropriate. if option_args is None and chosen_option.__doc__: try: config = chosen_option._config if config is None: config = grapeConfig.grapeConfig() else: config = grapeConfig.grapeRepoConfig(config) option_args = utility.parseArgs(chosen_option.__doc__, args[1:], config) except SystemExit as e: if len(args) > 1 and "--help" != args[1] and "-h" != args[1]: print("GRAPE PARSING ERROR: could not parse %s\n" % (args[1:])) raise e if globalArgs is not None: utility.applyGlobalArgs(globalArgs) try: if isinstance(chosen_option, resumable.Resumable): if option_args["--continue"]: return chosen_option._resume(option_args) return chosen_option.execute(option_args) except git.GrapeGitError as e: print traceback.print_exc() print ("GRAPE: Uncaught Error %s in grape-%s when executing '%s' in '%s'\n%s" % (e.code, chosen_option._key, e.gitCommand, e.cwd, e.gitOutput)) exit(e.code) except utility.NoWorkspaceDirException as e: print ("GRAPE: grape %s must be run from a grape workspace." % chosen_option.key) print ("GRAPE: %s" % e.message) exit(1) finally: if globalArgs is not None: utility.popGlobalArgs() # Present the main menu def presentTextMenu(self): width = 60 print("GRAPE - Git Replacement for \"Awesome\" PARSEC Environment".center(width, '*')) longest_key = 0 for currOption in self._options: if len(currOption.key) > longest_key: longest_key = len(currOption.key) for currSection in self._sections: lowered_section = currSection.strip().lower() print("\n" + (" %s " % currSection).center(width, '*')) for currOption in self._options: if currOption.section.strip().lower() != lowered_section: continue print("%s: %s" % (currOption.key.ljust(longest_key), currOption.description())) # configures a ConfigParser object with all default values and sections needed by our Option objects def setDefaultConfig(self, cfg): cfg.ensureSection("repo") cfg.set("repo", "name", "repo_name_not.yet.configured") cfg.set("repo", "url", "https://not.yet.configured/scm/project/unknown.git") cfg.set("repo", "httpsbase", "https://not.yet.configured") cfg.set("repo", "sshbase", "ssh://[email protected]") for currOption in self._options: currOption.setDefaultConfig(cfg)
bsd-3-clause
-6,110,235,504,380,364,000
38.72093
139
0.584602
false
gluke77/rally
rally/common/db/api.py
1
13473
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the rally.common.db namespace. Call these functions from rally.common.db namespace, not the rally.common.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/cinder/cinder.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ import datetime as dt from oslo_config import cfg from oslo_db import api as db_api from oslo_db import options as db_options import six from rally.common.i18n import _ CONF = cfg.CONF db_options.set_defaults(CONF, connection="sqlite:////tmp/rally.sqlite", sqlite_db="rally.sqlite") IMPL = None def serialize(fn): def conv(data): if data is None: return None if isinstance(data, (six.integer_types, six.string_types, six.text_type, dt.date, dt.time, float, )): return data if isinstance(data, dict): return {k: conv(v) for k, v in six.iteritems(data)} if isinstance(data, (list, tuple)): return [conv(i) for i in data] if hasattr(data, "_as_dict"): result = data._as_dict() for k, v in six.iteritems(result): result[k] = conv(v) return result raise ValueError(_("Can not serialize %s") % data) def wrapper(*args, **kwargs): result = fn(*args, **kwargs) return conv(result) return wrapper def get_impl(): global IMPL if not IMPL: _BACKEND_MAPPING = {"sqlalchemy": "rally.common.db.sqlalchemy.api"} IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING) return IMPL def engine_reset(): """Reset DB engine.""" get_impl().engine_reset() def schema_cleanup(): """Drop DB schema. This method drops existing database.""" get_impl().schema_cleanup() def schema_upgrade(revision=None): """Migrate the database to `revision` or the most recent revision.""" return get_impl().schema_upgrade(revision) def schema_create(): """Create database schema from models description.""" return get_impl().schema_create() def schema_revision(): """Return the schema revision.""" return get_impl().schema_revision() def schema_stamp(revision): """Stamps database with provided revision.""" return get_impl().schema_stamp(revision) def task_get(uuid): """Returns task by uuid. :param uuid: UUID of the task. :raises TaskNotFound: if the task does not exist. :returns: task dict with data on the task. """ return get_impl().task_get(uuid) def task_get_status(uuid): """Returns task by uuid. :param uuid: UUID of the task. :raises TaskNotFound: if the task does not exist. :returns: task dict with data on the task. """ return get_impl().task_get_status(uuid) def task_get_detailed_last(): """Returns the most recently created task.""" return get_impl().task_get_detailed_last() def task_get_detailed(uuid): """Returns task with results by uuid. :param uuid: UUID of the task. :returns: task dict with data on the task and its results. """ return get_impl().task_get_detailed(uuid) def task_create(values): """Create task record in DB. :param values: dict with record values. :returns: task dict with data on the task. """ return get_impl().task_create(values) def task_update(uuid, values): """Update task by values. :param uuid: UUID of the task. :param values: dict with record values. :raises TaskNotFound: if the task does not exist. :returns: new updated task dict with data on the task. """ return get_impl().task_update(uuid, values) def task_update_status(task_uuid, status, allowed_statuses): """Update task status with specified value. :param task_uuid: string with UUID of Task instance. :param status: new value to wrote into db instead of status. :param allowed_statuses: list of expected statuses to update in db. :raises RallyException: if task not found with specified status. :returns: the count of rows match as returned by the database's "row count" feature """ return get_impl().task_update_status(task_uuid, allowed_statuses, status) def task_list(status=None, deployment=None): """Get a list of tasks. :param status: Task status to filter the returned list on. If set to None, all the tasks will be returned. :param deployment: deployment UUID to filter the returned list on. if set to None tasks from all deployments well be returned. :returns: A list of dicts with data on the tasks. """ return get_impl().task_list(status=status, deployment=deployment) def task_delete(uuid, status=None): """Delete a task. This method removes the task by the uuid, but if the status argument is specified, then the task is removed only when these statuses are equal otherwise an exception is raised. :param uuid: UUID of the task. :raises TaskNotFound: if the task does not exist. :raises TaskInvalidStatus: if the status of the task does not equal to the status argument. """ return get_impl().task_delete(uuid, status=status) def task_result_get_all_by_uuid(task_uuid): """Get list of task results. :param task_uuid: string with UUID of Task instance. :returns: list instances of TaskResult. """ return get_impl().task_result_get_all_by_uuid(task_uuid) def task_result_create(task_uuid, key, data): """Append result record to task. :param task_uuid: string with UUID of Task instance. :param key: key expected to update in task result. :param data: data expected to update in task result. :returns: TaskResult instance appended. """ return get_impl().task_result_create(task_uuid, key, data) def deployment_create(values): """Create a deployment from the values dictionary. :param values: dict with record values on the deployment. :returns: a dict with data on the deployment. """ return get_impl().deployment_create(values) def deployment_delete(uuid): """Delete a deployment by UUID. :param uuid: UUID of the deployment. :raises DeploymentNotFound: if the deployment does not exist. :raises DeploymentIsBusy: if the resource is not enough. """ return get_impl().deployment_delete(uuid) def deployment_get(deployment): """Get a deployment by UUID. :param deployment: UUID or name of the deployment. :raises DeploymentNotFound: if the deployment does not exist. :returns: a dict with data on the deployment. """ return get_impl().deployment_get(deployment) def deployment_update(uuid, values): """Update a deployment by values. :param uuid: UUID of the deployment. :param values: dict with items to update. :raises DeploymentNotFound: if the deployment does not exist. :returns: a dict with data on the deployment. """ return get_impl().deployment_update(uuid, values) def deployment_list(status=None, parent_uuid=None, name=None): """Get list of deployments. :param status: if None returns any deployments with any status. :param parent_uuid: filter by parent. If None, return only "root" deployments. :param name: Name of deployment :returns: a list of dicts with data on the deployments. """ return get_impl().deployment_list(status=status, parent_uuid=parent_uuid, name=name) def resource_create(values): """Create a resource from the values dictionary. :param values: a dict with data on the resource. :returns: a dict with updated data on the resource. """ return get_impl().resource_create(values) def resource_get_all(deployment_uuid, provider_name=None, type=None): """Return resources of a deployment. :param deployment_uuid: filter by uuid of a deployment :param provider_name: filter by provider_name, if is None, then return all providers :param type: filter by type, if is None, then return all types :returns: a list of dicts with data on a resource """ return get_impl().resource_get_all(deployment_uuid, provider_name=provider_name, type=type) def resource_delete(id): """Delete a resource. :param id: ID of a resource. :raises ResourceNotFound: if the resource does not exist. """ return get_impl().resource_delete(id) def verification_create(deployment_uuid): """Create Verification record in DB. :param deployment_uuid: UUID of the deployment. :returns: a dict with verification data. """ return get_impl().verification_create(deployment_uuid) def verification_get(verification_uuid): """Returns verification by UUID. :param verification_uuid: UUID of the verification. :raises NotFoundException: if verification does not exist. :returns: a dict with verification data. """ return get_impl().verification_get(verification_uuid) def verification_delete(verification_uuid): """Delete verification. :param verification_uuid: UUID of the verification. :raises NotFoundException: if verification does not exist. """ return get_impl().verification_delete(verification_uuid) def verification_update(uuid, values): """Update verification by values. :param uuid: UUID of the verification. :param values: dict with record values. :raises NotFoundException: if verification does not exist. :returns: new updated task dict with data on the task. """ return get_impl().verification_update(uuid, values) def verification_list(status=None): """Get a list of verifications. :param status: Verification status to filter the returned list on. :returns: A list of dicts with data on the verifications. """ return get_impl().verification_list(status=status) def verification_result_get(verification_uuid): """Get dict of verification results. :param verification_uuid: string with UUID of Verification instance. :returns: dict instance of VerificationResult. """ return get_impl().verification_result_get(verification_uuid) def verification_result_create(verification_uuid, values): """Append result record to verification. :param verification_uuid: string with UUID of Verification instance. :param values: dict with record values. :returns: TaskResult instance appended. """ return get_impl().verification_result_create(verification_uuid, values) def register_worker(values): """Register a new worker service at the specified hostname. :param values: A dict of values which must contain the following: { "hostname": the unique hostname which identifies this worker service. } :returns: A worker. :raises WorkerAlreadyRegistered: if worker already registered """ return get_impl().register_worker(values) def get_worker(hostname): """Retrieve a worker service record from the database. :param hostname: The hostname of the worker service. :returns: A worker. :raises WorkerNotFound: if worker not found """ return get_impl().get_worker(hostname) def unregister_worker(hostname): """Unregister this worker with the service registry. :param hostname: The hostname of the worker service. :raises WorkerNotFound: if worker not found """ get_impl().unregister_worker(hostname) def update_worker(hostname): """Mark a worker as active by updating its "updated_at" property. :param hostname: The hostname of this worker service. :raises WorkerNotFound: if worker not found """ get_impl().update_worker(hostname)
apache-2.0
8,278,875,098,060,252,000
29.620455
79
0.66496
false
stackforge/cloudbase-init
cloudbaseinit/plugins/common/userdataplugins/cloudconfig.py
1
4200
# Copyright 2013 Mirantis Inc. # Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as oslo_logging import yaml from cloudbaseinit import conf as cloudbaseinit_conf from cloudbaseinit.plugins.common import execcmd from cloudbaseinit.plugins.common.userdataplugins import base from cloudbaseinit.plugins.common.userdataplugins.cloudconfigplugins import ( factory ) CONF = cloudbaseinit_conf.CONF LOG = oslo_logging.getLogger(__name__) DEFAULT_ORDER_VALUE = 999 class CloudConfigError(Exception): pass class CloudConfigPluginExecutor(object): """A simple executor class for processing cloud-config plugins. :kwarg plugins: Pairs of plugin names and the values corresponding to that plugin. """ def __init__(self, **plugins): def _lookup_priority(plugin): all_plugins = (CONF.cloud_config_plugins or list(factory.PLUGINS.keys())) # return the order from the config or default list try: return all_plugins.index(plugin) except ValueError: # If plugin is not supported or does not exist # default to a sane and unreachable value. return DEFAULT_ORDER_VALUE self._expected_plugins = sorted( plugins.items(), key=lambda item: _lookup_priority(item[0])) @classmethod def from_yaml(cls, stream): """Initialize an executor from an yaml stream.""" loader = getattr(yaml, 'CLoader', yaml.Loader) try: content = yaml.load(stream, Loader=loader) except (TypeError, ValueError, AttributeError): raise CloudConfigError("Invalid yaml stream provided.") if not content: raise CloudConfigError("Empty yaml stream provided.") return cls(**content) def execute(self): """Call each plugin, in the order defined by _lookup_priority""" reboot = execcmd.NO_REBOOT plugins = factory.load_plugins() for plugin_name, value in self._expected_plugins: if CONF.cloud_config_plugins: try: CONF.cloud_config_plugins.index(plugin_name) except ValueError: LOG.info("Plugin %r is disabled", plugin_name) continue method = plugins.get(plugin_name) if not method: LOG.error("Plugin %r is currently not supported", plugin_name) continue try: requires_reboot = method(value) if requires_reboot: reboot = execcmd.RET_END except Exception: LOG.exception("Processing plugin %s failed", plugin_name) return reboot class CloudConfigPlugin(base.BaseUserDataPlugin): def __init__(self): super(CloudConfigPlugin, self).__init__("text/cloud-config") def process_non_multipart(self, part): """Process the given data, if it can be loaded through yaml. If any plugin requires a reboot, it will return a particular value, which will be processed on a higher level. """ try: executor = CloudConfigPluginExecutor.from_yaml(part) except CloudConfigError as ex: LOG.error('Could not process part type %(type)r: %(err)r', {'type': type(part), 'err': str(ex)}) else: return executor.execute() def process(self, part): payload = part.get_payload(decode=True) return self.process_non_multipart(payload)
apache-2.0
6,654,339,422,640,970,000
34.294118
78
0.628095
false
ctb/2014-streaming
pipeline/sam-scan-to-coverage-dict.py
1
2919
#! /usr/bin/env python import sys import argparse import screed import cPickle def ignore_at(iter): for item in iter: if item.startswith('@'): continue yield item def main(): parser = argparse.ArgumentParser() parser.add_argument('genome') parser.add_argument('samfile') parser.add_argument('coverage_d_pickle') parser.add_argument('covhist') args = parser.parse_args() coords_d = {} for record in screed.open(args.genome): coords_d[record.name] = [0]*len(record.sequence) n = 0 n_skipped = 0 for samline in ignore_at(open(args.samfile)): n += 1 if n % 10000 == 0: print >>sys.stderr, '...', n readname, _, refname, refpos, _, _, _, _, _, seq = samline.split()[:10] if refname == '*' or refpos == '*': # (don't count these as skipped.) continue refpos = int(refpos) try: coord = coords_d[refname] for pos in range(len(seq)): coord[refpos - 1 + pos] += 1 except KeyError: print >>sys.stderr, "unknown refname: %s; ignoring (read %s)" % (refname, readname) n_skipped += 1 continue if n_skipped / float(n) > .01: raise Exception, "Error: too many reads ignored! %d of %d" % \ (n_skipped, n) # now, calculate coverage per read! coverage_d = {} total = 0. n = 0 for samline in ignore_at(open(args.samfile)): readname, _, refname, refpos, _, _, _, _, _, seq = samline.split()[:10] if refname == '*' or refpos == '*': # (don't count these as skipped.) continue refpos = int(refpos) try: coord = coords_d[refname] except KeyError: continue slice = list(coord[refpos - 1:refpos - 1 + len(seq)]) slice = sorted(slice) coverage = slice[len(slice)/2] # median assert readname not in coverage_d, readname coverage_d[readname] = coverage total += coverage n += 1 if n % 10000 == 0: print >>sys.stderr, '...', n print 'average of the median mapping coverage', total / float(n) print 'min coverage by read', min(coverage_d.values()) print 'max coverage by read', max(coverage_d.values()) covhist_d = {} sofar = 0 for v in coverage_d.values(): v = int(v + 0.5) covhist_d[v] = covhist_d.get(v, 0) + 1 fp = open(args.covhist, 'w') total = sum(covhist_d.values()) sofar = 0 for k in range(0, max(covhist_d.keys()) + 1): v = covhist_d.get(k, 0) sofar += v print >>fp, k, v, sofar, sofar / float(total) fp.close() fp = open(args.coverage_d_pickle, 'w') cPickle.dump(coverage_d, fp) fp.close() if __name__ == '__main__': main()
bsd-3-clause
-6,013,978,973,039,782,000
26.8
95
0.526208
false
svanoort/python-client-benchmarks
benchmark.py
1
11059
#!/usr/bin/env python import timeit import time import string import argparse import csv import sys if sys.version_info[0] > 2: import urllib.parse as urlparse else: import urlparse # Import clients, so script fails fast if not available from pycurl import Curl try: from cStringIO import StringIO except: try: from StringIO import StringIO except ImportError: from io import StringIO import requests, urllib, urllib2, urllib3 def run_test(library, url, cycles, connection_reuse, options, setup_test, run_test, delay=None, timer=None): """ Runs a benchmark, showing start & stop the setup_test is a String.template with $url as an option the run_test allows for the same """ TIMER = timeit.default_timer if timer and timer.lower() == 'cpu': TIMER = time.clock # Linux only print("START testing {0} performance with {1} cycles and connection reuse {2}".format(library, cycles, connection_reuse)) print("Options: {0}".format(options)) run_cmd = string.Template(run_test).substitute(url=url) if delay: run_cmd = run_cmd + "; time.sleep({0})".format(delay) setup_cmd = string.Template(setup_test).substitute(url=url) mytime = timeit.timeit(stmt=run_cmd, setup=setup_cmd, number=cycles, timer=TIMER) if delay: mytime = mytime - (delay * cycles) print("END testing result: {0}".format(mytime)) print(' ') result = [library, connection_reuse, options, cycles, mytime] return result def run_size_benchmarks(url='', cycles=10, delay=None, output_file=None, length_api_format='/length/$length', **kwargs): timer_type = kwargs.get('timer') """ Run variable-size benchmarks, where URL is the base url """ sizes = [4, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072] # Yields ~10 GB of traffic, be careful! REQUESTS_NOREUSE = ('requests', False, 'Default', 'import requests', "r = requests.get('$url', verify=False)") REQUESTS_REUSE = ('requests', True, 'Default', "import requests; \ session = requests.Session(); \ r = requests.Request('GET', '$url').prepare()", "v = session.send(r, verify=False)") PYCURL_REUSE = ('pycurl', True, "Reuse handle, save response to new cStringIO buffer", "from pycurl import Curl; from cStringIO import StringIO; \ mycurl=Curl(); \ mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \ mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \ mycurl.setopt(mycurl.URL, '$url')", "body = StringIO(); \ mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \ mycurl.perform(); \ val = body.getvalue(); \ body.close()") PYCURL_NOREUSE = ('pycurl', False, "Reuse handle, save response to new cStringIO buffer", "from pycurl import Curl; from cStringIO import StringIO; \ mycurl=Curl(); \ mycurl.setopt(mycurl.URL, '$url'); \ mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \ mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \ body = StringIO(); \ mycurl.setopt(mycurl.FORBID_REUSE, 1)", "body = StringIO(); \ mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \ mycurl.perform(); \ val = body.getvalue(); \ body.close()") TEST_TYPES = [REQUESTS_NOREUSE, PYCURL_NOREUSE, REQUESTS_REUSE, PYCURL_REUSE] all_results = list() # Run tests for size in sizes: temp_url = url + string.Template(length_api_format).substitute(length=size) for test in TEST_TYPES: result = run_test(test[0], temp_url, cycles, test[1], test[2], test[3], test[4], delay=delay, timer=timer_type) del result[3] # Don't need cycles result.insert(0, size) all_results.append(result) # Transform tuples to size, time graphs for each response size final_output = [[x, 0, 0, 0, 0] for x in sizes] for i in xrange(0, len(sizes)): final_output[i][1] = all_results[i*4][4] final_output[i][2] = all_results[i*4+1][4] final_output[i][3] = all_results[i*4+2][4] final_output[i][4] = all_results[i*4+3][4] headers = ('Response_size', 'Requests Time (no cnxn reuse)', 'pyCurl Time (no cnxn reuse)', 'Requests Time (cnxn reuse)', 'pyCurl Time (cnxn reuse)') if output_file: with open(output_file, 'wb') as csvfile: outwriter = csv.writer(csvfile, dialect=csv.excel) outwriter.writerow(headers) for result in final_output: outwriter.writerow(result) def run_all_benchmarks(url='', cycles=10, delay=None, output_file=None, **kwargs): results = list() headers = ('Library','Reuse Connections?','Options', 'Time') tests = list() timer_type = kwargs.get('timer') # Library, cnxn_reuse, options, setup, run_stmt # Requests tests.append(('requests', False, 'Default', 'import requests', "r = requests.get('$url', verify=False)")) tests.append(('requests', True, 'Default', "import requests; \ session = requests.Session(); \ r = requests.Request('GET', '$url').prepare()", "v = session.send(r, verify=False)")) # PyCurl tests.append(('pycurl', True, "Reuse handle, don't save body", "from pycurl import Curl; \ mycurl=Curl(); \ mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \ mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \ mycurl.setopt(mycurl.URL, '$url'); \ mycurl.setopt(mycurl.WRITEFUNCTION, lambda x: None)", "mycurl.perform()")) tests.append(('pycurl', True, "Reuse handle, save response to new cStringIO buffer", "from pycurl import Curl; from cStringIO import StringIO; \ mycurl=Curl(); \ mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \ mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \ mycurl.setopt(mycurl.URL, '$url')", "body = StringIO(); \ mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \ mycurl.perform(); \ val = body.getvalue(); \ body.close()")) tests.append(('pycurl', False, "Reuse handle, save response to new cStringIO buffer", "from pycurl import Curl; from cStringIO import StringIO; \ mycurl=Curl(); \ mycurl.setopt(mycurl.URL, '$url'); \ mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \ mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \ body = StringIO(); \ mycurl.setopt(mycurl.FORBID_REUSE, 1)", "body = StringIO(); \ mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \ mycurl.perform(); \ val = body.getvalue(); \ body.close()")) # The use of global DNS cache avoids a bug on some linux systems with libcurl # playing badly with DNS resolvers tests.append(('pycurl', False, "New handle, save response to new cStringIO buffer", "from pycurl import Curl; from cStringIO import StringIO", "body = StringIO(); \ mycurl=Curl(); \ body = StringIO(); \ mycurl.setopt(mycurl.URL, '$url'); \ mycurl.setopt(mycurl.SSL_VERIFYPEER, 0); \ mycurl.setopt(mycurl.SSL_VERIFYHOST, 0); \ mycurl.setopt(mycurl.DNS_USE_GLOBAL_CACHE, True); \ mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \ mycurl.perform(); \ val = body.getvalue(); \ body.close()")) # URLLIB3 # Making URLLIB3 accept self-signed certs is a beast. You have to create a connection pool with the hostname and port supplied. # See: http://stackoverflow.com/questions/18061640/ignore-certificate-validation-with-urllib3 # Yes, there's an option to bypass hostname verification but I cannot make it play nicely. parsed_url = urlparse.urlparse(url) scheme = parsed_url.scheme hostname = parsed_url.hostname port = parsed_url.port setup_string = "" if scheme == 'https': setup_string = "import urllib3; \ http_pool = urllib3.HTTPSConnectionPool('{0}', port={1}, cert_reqs='CERT_NONE', assert_hostname=False)".format(hostname, port) else: setup_string = "import urllib3; http_pool = urllib3.PoolManager()" tests.append(('urllib3', True, 'Default', setup_string, "body = http_pool.urlopen('GET', '$url').read()")) # URLLIB2 #tests.append(('urllib2', False, '', # "import urllib2", # "body = urllib2.urlopen('$url').read()")) # URLLIB tests.append(('urllib', False, 'Default', "import urllib", "body = urllib.urlopen('$url').read()")) for test in tests: my_result = run_test(test[0], url, cycles, test[1], test[2], test[3], test[4], delay=delay, timer=timer_type) results.append((test[0], test[1], test[2], my_result[-1])) if output_file: with open(output_file, 'wb') as csvfile: outwriter = csv.writer(csvfile, dialect=csv.excel) outwriter.writerow(('url', 'cycles', 'delay')) outwriter.writerow((url, cycles, delay)) outwriter.writerow(headers) for result in results: outwriter.writerow(result) if(__name__ == '__main__'): parser = argparse.ArgumentParser(description="Benchmark different python request frameworks") parser.add_argument('--url', metavar='u', type=str, default='http://localhost:8080/ping', help="URL to run requests against") parser.add_argument('--cycles', metavar='c', type=int, default=10000, help="Number of cycles to run") parser.add_argument('--delay', metavar='d', type=float, help="Delay in seconds between requests") parser.add_argument('--output-file', metavar='o', nargs='?', type=str, help="Output file to write CSV results to") parser.add_argument('--benchmark-type', type=str, default="full", choices=('full','size'), help="Benchmark type to run: full [default]=all libraries, 1 request, size=basic pycurl/requests tests with different request sizes") parser.add_argument('--timer', type=str, default="real", choices=('real','cpu'), help="Timer type: real [default] or cpu") parser.add_argument('--length-api-format', metavar='l', type=str, default="/length/$length", help="Template for API request that accepts response length parameter, for size benchmarks") args = vars(parser.parse_args()) if args.get('url') is None: print("No URL supplied, you must supply a URL!") exit(1) print('RUNNING PYTHON CLIENT BENCHMARKS WITH ARGS: {0}'.format(args)) if args['benchmark_type'] == 'full': run_all_benchmarks(**args) elif args['benchmark_type'] =='size': run_size_benchmarks(**args) else: raise Exception("Illegal benchmark type: {0}".format(args['benchmark_type']))
apache-2.0
8,804,450,190,701,221,000
41.698842
229
0.607288
false
eeucalyptus/eeDA
app/graphics/wirerenderer.py
1
1977
from . import Renderer from data.util import Vector2i, Vector2d from .common import eeDAcolor, pMakeCircleArray, pMakeLineArray class WireRenderer(Renderer): DEPTH = 1.0 def __init__(self, wire, gl): super().__init__(gl) self.wire = wire self.callList = self._genCallList() def _genCallList(self): genList = self.gl.glGenLists(1) self.gl.glNewList(genList, self.gl.GL_COMPILE) self.width = self.wire.style['width'] / 2 self.color = self.wire.style['color'] self.pointAry = [] con0_pos = self.wire.connectors[0].pos con1_pos = self.wire.connectors[1].pos self.pointAry.append(self.wire.connectors[0].pos) # Start point for point in self.wire.points: self.pointAry.append(point) # Intermediate points self.pointAry.append(self.wire.connectors[1].pos) # End point self.vertices = pMakeLineArray(self.pointAry, Vector2i(), self.width, self.DEPTH) if not self.wire.connectors[0].other: self.renderUnconnected(self.pointAry[0]) if not self.wire.connectors[0].other: self.renderUnconnected(self.pointAry[-1]) self.setColor(self.color) self.gl.glEnableClientState(self.gl.GL_VERTEX_ARRAY) self.gl.glVertexPointer(3, self.gl.GL_FLOAT, 0, self.vertices) self.gl.glDrawArrays(self.gl.GL_TRIANGLE_STRIP, 0, len(self.vertices) / 3) self.gl.glDisableClientState(self.gl.GL_VERTEX_ARRAY) self.gl.glEndList() return genList def renderUnconnected(self, pos): self.setColor(eeDAcolor.WIRE_UNCONNECTED) self.gl.glEnableClientState(self.gl.GL_VERTEX_ARRAY) circle = pMakeCircleArray(pos, self.width * 1.5, self.DEPTH, 30) self.gl.glVertexPointer(3, self.gl.GL_FLOAT, 0, circle) self.gl.glDrawArrays(self.gl.GL_TRIANGLE_FAN, 0, len(circle) / 3) self.gl.glDisableClientState(self.gl.GL_VERTEX_ARRAY)
apache-2.0
-317,239,216,122,899,400
34.945455
89
0.654527
false
andrewsosa/hackfsu_com
api/api/models/hack.py
1
3160
from django.db import models from api.models import Hackathon from api.models.judging_criteria import JudgingCriteria from api.models.judging_expo import JudgingExpo from django.contrib import admin from hackfsu_com.admin import hackfsu_admin class HackQuerySet(models.QuerySet): from api.models.judge_info import JudgeInfo def from_expo(self, expo: JudgingExpo): return self.filter( table_number__gte=expo.table_number_start, table_number__lte=expo.table_number_end ) def from_table_number(self, table: int): return self.get(table_number=table) def with_active_judge(self, judge: JudgeInfo): return self.filter(current_judges=judge) def without_previous_judge(self, judge: JudgeInfo): return self.exclude(judges=judge) class HackManager(models.Manager): def get_next_table_number(self): number = 1 hackathon = Hackathon.objects.current() while self.filter(hackathon=hackathon, table_number=number).exists(): number += 1 return number class Hack(models.Model): objects = HackManager.from_queryset(HackQuerySet)() hackathon = models.ForeignKey(to=Hackathon, on_delete=models.CASCADE) table_number = models.IntegerField() name = models.CharField(max_length=100) # Devpost "Submission Title" description = models.TextField() # Devpost "Plain Description" extra_judging_criteria = models.ManyToManyField(to=JudgingCriteria, blank=True) # Devpost "Desired Prizes" current_judges = models.ManyToManyField(to='api.JudgeInfo', blank=True, related_name='judges_current') judges = models.ManyToManyField(to='api.JudgeInfo', blank=True, related_name='judges') total_judge_score = models.IntegerField(default=0) times_judged = models.IntegerField(default=0) def get_expo(self): expo = JudgingExpo.objects.filter( hackathon=self.hackathon, table_number_start__lte=self.table_number, table_number_end__gte=self.table_number ) if expo.exists(): return expo.all()[0] return None def get_expo_name(self) -> str: expo = self.get_expo() if expo is None: return 'N/A' return expo.name def get_criteria_names(self) -> str: names = [] for criteria in self.extra_judging_criteria.all(): names.append(criteria.name) return ', '.join(names) def __str__(self): return self.name @admin.register(Hack, site=hackfsu_admin) class HackAdmin(admin.ModelAdmin): list_filter = ('hackathon',) list_display = ('id', 'name', 'expo', 'table_number', 'total_judge_score') list_editable = ('table_number',) list_display_links = ('id', 'name') search_fields = ('name', 'table_number') ordering = ('table_number', 'total_judge_score') @staticmethod def expo(obj: Hack): return obj.get_expo_name() @staticmethod def extra_criteria(obj: Hack) -> str: return obj.get_criteria_names()
apache-2.0
8,763,194,786,855,953,000
34.111111
117
0.641772
false
2B5/ia-3B5
module3/syntax_processing/processing_purenltk.py
1
6358
import nltk from nltk.tokenize import sent_tokenize, word_tokenize _wordnet = nltk.corpus.wordnet from semantic_processing import semantic_processing as semantics from nltk.stem import WordNetLemmatizer class TextProcessor: def __init__(self, initial_text): self.text = initial_text def word_tag(self, word): if word[1] in ("NN", "NNS", "NNP", "NNPS"): return _wordnet.NOUN if word[1] in ("JJ", "JJR", "JJS"): return _wordnet.ADJ if word[1] in ("VB", "VBD", "VBG", "VBN", "VBP", "VBZ"): return _wordnet.VERB if word[1] in ("RB", "RBR", "RBS"): return _wordnet.ADV return None def get_sentiment(self, polarity): if polarity <= 0.5 and polarity >= 0: return "neutral" if polarity > 0.5: return "happy" if polarity < 0: return "sad" def remove_signs(self,word_list): new_list = word_list for word in new_list: if word in (".",";","!","?",","): word_list.remove(word) return new_list def traverse(self, t, np_list): try: t.label() except AttributeError: return else: if t.label() == 'NP': # print('NP:' + str(t.leaves())) np_list.append(t.leaves()) # print('NPhead:' + str(t.leaves()[-1])) for child in t: self.traverse(child, np_list) else: for child in t: self.traverse(child, np_list) def get_NP(self, np_list): final_list = [] for item in np_list: final_expr = "" for word in item: final_expr = final_expr + word[0] + " " final_list.append(final_expr) return final_list def processing(self): wordnet_lemmatizer = WordNetLemmatizer() map_list = [] try: sent_tokenize_list = sent_tokenize(self.text) for sentence in sent_tokenize_list: # print (sentence) word_list = self.remove_signs(word_tokenize(sentence)) tag_list = nltk.pos_tag(word_list) lemmatized_sent = [] proper_nouns = [] pronouns = [] verbs = [] nouns = [] processed_sentence = {} processed_sentence["original_sentence"] = sentence processed_sentence["subject"] = "" processed_sentence["predicate"] = "" processed_sentence["verbs"] = "" processed_sentence["nouns"] = [] processed_sentence["numbers"] = [] grammar = "NP: {<DT>?<JJ>*<NN>}" cp = nltk.RegexpParser(grammar) p_tree = cp.parse(tag_list) np_list = [] self.traverse(p_tree, np_list) final_list = self.get_NP(np_list) processed_sentence["noun_phrases"] = final_list for word in tag_list: w = word[0].lower() # print(word) tag = self.word_tag(word) # print(w, ": ", word[1]) if tag != None: lemmatized_word = wordnet_lemmatizer.lemmatize(w, tag) else : lemmatized_word = wordnet_lemmatizer.lemmatize(w, _wordnet.NOUN) if word[1] == "NNP" or word[1] == "NNPS": proper_nouns.append(lemmatized_word) if word[1] == "NN" or word[1] == "NNS": nouns.append(lemmatized_word) if word[1] == "CD" : processed_sentence["numbers"].append(lemmatized_word) if word[1] == "PRP": pronouns.append(lemmatized_word) if tag == "v": if (word[1] == "VBG" or word[1] == "VBN") and verbs[-1] == "be": verbs[-1] = lemmatized_word elif word[1] == "VBN" and verbs[-1] == "have": verbs[-1] = lemmatized_word else: verbs.append(lemmatized_word) if tag == "n" : processed_sentence["nouns"].append(lemmatized_word) lemmatized_sent.append(lemmatized_word) processed_sentence["sentence"] = lemmatized_sent processed_sentence["proper_nouns"] = proper_nouns # processed_sentance["Noun Phrase"] = list(noun_phrase) processed_sentence["pronouns"] = pronouns processed_sentence["verbs"] = verbs if len(processed_sentence["nouns"]) != 0 and len(pronouns) != 0: if lemmatized_sent.index(processed_sentence["nouns"][0]) < lemmatized_sent.index(pronouns[0]): processed_sentence["subject"] = processed_sentence["nouns"][0] else: processed_sentence["subject"] = pronouns[0] elif len(processed_sentence["nouns"]) != 0: processed_sentence["subject"] = processed_sentence["nouns"][0] elif len(pronouns) != 0: processed_sentence["subject"] = pronouns[0] if len(verbs) != 0: processed_sentence["predicate"] = verbs[0] processed_sentence["semantics"] = {} word_list = [w.lower() for w in word_list] context = semantics.remove_stopwords(word_list) lemmas = semantics.remove_stopwords(lemmatized_sent) for lemma in lemmas: processed_sentence["semantics"].setdefault(lemma, semantics.semantic_info(lemma, lemma, context)) map_list.append(processed_sentence) return map_list except Exception as e: print("Exception!") print(str(e)) print(type(e)) #text = "He is my brother." #t = TextProcessor(text) #lista = t.processing() #for prop in lista: # print(str(prop))
mit
-7,640,416,701,744,384,000
33.743169
117
0.477351
false
Antidote1911/cryptoshop
testing/cryptoshop/tests.py
1
2710
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Cryptoshop Strong file encryption. # Encrypt and decrypt file in GCM mode with AES, Serpent or Twofish as secure as possible. # Copyright(C) 2016 CORRAIRE Fabrice. [email protected] # ############################################################################ # This file is part of Cryptoshop-GUI (full Qt5 gui for Cryptoshop). # # Cryptoshop is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Cryptoshop is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Cryptoshop. If not, see <http://www.gnu.org/licenses/>. # ############################################################################ from cryptoshop import encryptstring from cryptoshop import decryptstring from cryptoshop import encryptfile from cryptoshop import decryptfile from cryptoshop._nonce_engine import generate_nonce_timestamp from cryptoshop._derivation_engine import calc_derivation from cryptoshop._derivation_engine import calc_derivation_formated def test_derivation(): print("============< test Argon2 derivation raw >============") print("passphrase= my password") print("salt= b'123456789'") test = calc_derivation(passphrase="my password", salt=b"123456789") print("hash= " + str(test)) def test_derivation2(): print("============< test Argon2 derivation formated >============") print("passphrase= my password") print("salt= b'123456789'") test = calc_derivation_formated(passphrase="my password", salt=b"123456789") print("hash= " + str(test)) def test_nonce(): print("============< test generating 100 uniques nonces >============") i = 1 while i < 100: print(generate_nonce_timestamp()) i += 1 def test_enc_dec_string(): # encrypt pt = "my super secret text to encrypt" cryptostring = encryptstring(string=pt, passphrase="my passphrase") # decrypt assert decryptstring(string=cryptostring, passphrase="my passphrase") == pt def test_enc_dec_file(): # encrypt encryptfile(filename="encrypt.me", passphrase="my passphrase", algo="srp") # decrypt result = decryptfile(filename="encrypt.me.cryptoshop", passphrase="my passphrase") assert (result == "successfully decrypted")
gpl-3.0
3,055,049,703,581,703,000
36.123288
90
0.656458
false
masayukig/stestr
stestr/repository/util.py
1
2625
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import os import sys def _get_default_repo_url(repo_type): if repo_type == 'sql': repo_file = os.path.join(os.getcwd(), '.stestr.sqlite') repo_url = 'sqlite:///' + repo_file elif repo_type == 'file': repo_url = os.getcwd() else: raise TypeError('Unrecognized repository type %s' % repo_type) return repo_url def get_repo_open(repo_type, repo_url=None): """Return an already initialized repo object given the parameters :param str repo_type: The repo module to use for the returned repo :param str repo_url: An optional repo url, if one is not specified the default $CWD/.stestr will be used. """ try: repo_module = importlib.import_module('stestr.repository.' + repo_type) except ImportError: if repo_type == 'sql': sys.exit("sql repository type requirements aren't installed. To " "use the sql repository ensure you installed the extra " "requirements with `pip install 'stestr[sql]'`") else: raise if not repo_url: repo_url = _get_default_repo_url(repo_type) return repo_module.RepositoryFactory().open(repo_url) def get_repo_initialise(repo_type, repo_url=None): """Return a newly initialized repo object given the parameters :param str repo_type: The repo module to use for the returned repo :param str repo_url: An optional repo url, if one is not specified the default $CWD/.stestr will be used. """ try: repo_module = importlib.import_module('stestr.repository.' + repo_type) except ImportError: if repo_type == 'sql': sys.exit("sql repository type requirements aren't installed. To " "use the sql repository ensure you installed the extra " "requirements with `pip install 'stestr[sql]'`") else: raise if not repo_url: repo_url = _get_default_repo_url(repo_type) return repo_module.RepositoryFactory().initialise(repo_url)
apache-2.0
5,784,564,498,509,507,000
37.602941
79
0.657524
false
edibledinos/pwnypack
docs/conf.py
1
11416
# -*- coding: utf-8 -*- # # pwnypack documentation build configuration file, created by # sphinx-quickstart on Wed Mar 25 15:04:19 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import mock sys.path.insert(0, os.path.abspath('..')) on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pwnypack' copyright = u'2015 - 2016, Certified Edible Dinosaurs' author = u'Ingmar Steen' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.9' # The full version, including alpha/beta/rc tags. release = '0.9.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if not on_rtd: html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pwnydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pwny.tex', u'pwny Documentation', u'Author', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pwny', u'pwny Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pwny', u'pwny Documentation', author, 'pwny', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The basename for the epub file. It defaults to the project name. #epub_basename = project # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. #epub_theme = 'epub' # The language of the text. It defaults to the language option # or 'en' if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Choose between 'default' and 'includehidden'. #epub_tocscope = 'default' # Fix unsupported image types using the Pillow. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True
mit
3,611,730,147,197,216,300
30.191257
80
0.705676
false
quantwizard-com/pythonbacktest
pythonbacktest/animation/ipythonchartanimation.py
1
1578
from IPython.display import display from matplotlib import animation, rc import abc class IPythonChartAnimation(object): __metaclass__ = abc.ABCMeta VIDEO_TAG = """<video controls> <source src="data:video/x-m4v;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>""" def __init__(self): self.__target_canvas = None self.__number_of_frames = None self.__interval = None @abc.abstractmethod def _init_animation(self): raise NotImplementedError() def _start_animation(self, animation_callback, init_animation_callback, target_canvas, frames=100, interval=200): anim = animation.FuncAnimation(target_canvas, animation_callback, init_func=init_animation_callback, frames=frames, interval=interval, blit=True) rc('animation', html='html5') display(anim) @property def target_canvas(self): return self.__target_canvas @target_canvas.setter def target_canvas(self, canvas): self.__target_canvas = canvas @property def number_of_frames(self): return self.__number_of_frames @number_of_frames.setter def number_of_frames(self, value): self.__number_of_frames = value @property def interval(self): return self.__interval @interval.setter def interval(self, inter): self.__interval = inter
apache-2.0
3,753,715,281,970,296,300
26.684211
95
0.586185
false
JasonCozens/CalTools
cal_tools/test/jcal_test.py
1
1931
__author__ = 'Jason' import icalendar import unittest from cal_tools import jcal import json class JCalTest(unittest.TestCase): def test_new_calendar(self): # Arrange. cal = icalendar.Calendar() # Act. j_cal = jcal.JCal.from_calendar(cal) # Assert. self.assertListEqual(json.loads(j_cal), ["vcalendar", [], []]) def test_calendar_with_properties(self): # Arrange. cal = icalendar.Calendar() cal.add('version', '2.0') cal.add('prodid', 'test.com/abc') # Act. j_cal = jcal.JCal.from_calendar(cal) # Assert. expected = ["vcalendar", [ ['version', {}, 'text', '2.0'], ['prodid', {}, 'text', 'test.com/abc'] ], [] ] self.assertListEqual(json.loads(j_cal), expected) print(j_cal) def test_calendar_with_sub_component(self): # Arrange. cal = icalendar.Calendar() cal.add_component(icalendar.Event()) # Act. j_cal = jcal.JCal.from_calendar(cal) # Assert. expected = ["vcalendar", [], [["vevent", [], []]] ] self.assertListEqual(json.loads(j_cal), expected) print(j_cal) def test_vbool_property(self): # Arrange. expected = ( b'BEGIN:VCALENDAR\r\n' + b'X-NON-SMOKING:TRUE\r\n' + b'END:VCALENDAR\r\n' ) component = icalendar.Calendar.from_ical(expected) # component = icalendar.Calendar() # component.add('x-non-smoking', True, encode=False) # Act. prop = jcal.JCal._from_prop(component) # Assert. self.assertEqual(prop, [['x-non-smoking', {}, 'boolean', True]]) self.assertEqual( json.dumps(prop), '[["x-non-smoking", {}, "boolean", true]]')
bsd-2-clause
-7,451,479,154,639,959,000
28.272727
72
0.508545
false
haad/ansible
test/sanity/validate-modules/main.py
1
49902
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015 Matt Martz <[email protected]> # Copyright (C) 2015 Rackspace US, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function import abc import argparse import ast import json import errno import os import re import subprocess import sys import tempfile import traceback from collections import OrderedDict from contextlib import contextmanager from distutils.version import StrictVersion from fnmatch import fnmatch from ansible import __version__ as ansible_version from ansible.executor.module_common import REPLACER_WINDOWS from ansible.plugins.loader import fragment_loader from ansible.utils.plugin_docs import BLACKLIST, get_docstring from module_args import AnsibleModuleImportError, get_argument_spec from schema import doc_schema, metadata_1_1_schema, return_schema from utils import CaptureStd, parse_yaml from voluptuous.humanize import humanize_error from ansible.module_utils.six import PY3, with_metaclass if PY3: # Because there is no ast.TryExcept in Python 3 ast module TRY_EXCEPT = ast.Try # REPLACER_WINDOWS from ansible.executor.module_common is byte # string but we need unicode for Python 3 REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8') else: TRY_EXCEPT = ast.TryExcept BLACKLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea')) INDENT_REGEX = re.compile(r'([\t]*)') TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\(.*') BLACKLIST_IMPORTS = { 'requests': { 'new_only': True, 'error': { 'code': 203, 'msg': ('requests import found, should use ' 'ansible.module_utils.urls instead') } }, r'boto(?:\.|$)': { 'new_only': True, 'error': { 'code': 204, 'msg': 'boto import found, new modules should use boto3' } }, } class ReporterEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, Exception): return str(o) return json.JSONEncoder.default(self, o) class Reporter(object): def __init__(self): self.files = OrderedDict() def _ensure_default_entry(self, path): try: self.files[path] except KeyError: self.files[path] = { 'errors': [], 'warnings': [], 'traces': [], 'warning_traces': [] } def _log(self, path, code, msg, level='error', line=0, column=0): self._ensure_default_entry(path) lvl_dct = self.files[path]['%ss' % level] lvl_dct.append({ 'code': code, 'msg': msg, 'line': line, 'column': column }) def error(self, *args, **kwargs): self._log(*args, level='error', **kwargs) def warning(self, *args, **kwargs): self._log(*args, level='warning', **kwargs) def trace(self, path, tracebk): self._ensure_default_entry(path) self.files[path]['traces'].append(tracebk) def warning_trace(self, path, tracebk): self._ensure_default_entry(path) self.files[path]['warning_traces'].append(tracebk) @staticmethod @contextmanager def _output_handle(output): if output != '-': handle = open(output, 'w+') else: handle = sys.stdout yield handle handle.flush() handle.close() @staticmethod def _filter_out_ok(reports): temp_reports = OrderedDict() for path, report in reports.items(): if report['errors'] or report['warnings']: temp_reports[path] = report return temp_reports def plain(self, warnings=False, output='-'): """Print out the test results in plain format output is ignored here for now """ ret = [] for path, report in Reporter._filter_out_ok(self.files).items(): traces = report['traces'][:] if warnings and report['warnings']: traces.extend(report['warning_traces']) for trace in traces: print('TRACE:') print('\n '.join((' %s' % trace).splitlines())) for error in report['errors']: error['path'] = path print('%(path)s:%(line)d:%(column)d: E%(code)d %(msg)s' % error) ret.append(1) if warnings: for warning in report['warnings']: warning['path'] = path print('%(path)s:%(line)d:%(column)d: W%(code)d %(msg)s' % warning) return 3 if ret else 0 def json(self, warnings=False, output='-'): """Print out the test results in json format warnings is not respected in this output """ ret = [len(r['errors']) for _, r in self.files.items()] with Reporter._output_handle(output) as handle: print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle) return 3 if sum(ret) else 0 class Validator(with_metaclass(abc.ABCMeta, object)): """Validator instances are intended to be run on a single object. if you are scanning multiple objects for problems, you'll want to have a separate Validator for each one.""" def __init__(self, reporter=None): self.reporter = reporter @abc.abstractproperty def object_name(self): """Name of the object we validated""" pass @abc.abstractproperty def object_path(self): """Path of the object we validated""" pass @abc.abstractmethod def validate(self): """Run this method to generate the test results""" pass class ModuleValidator(Validator): BLACKLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt') BLACKLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml', 'shippable.yml', '.gitattributes', '.gitmodules', 'COPYING', '__init__.py', 'VERSION', 'test-docs.sh')) BLACKLIST = BLACKLIST_FILES.union(BLACKLIST['MODULE']) PS_DOC_BLACKLIST = frozenset(( 'async_status.ps1', 'slurp.ps1', 'setup.ps1' )) WHITELIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function')) def __init__(self, path, analyze_arg_spec=False, base_branch=None, git_cache=None, reporter=None): super(ModuleValidator, self).__init__(reporter=reporter or Reporter()) self.path = path self.basename = os.path.basename(self.path) self.name, _ = os.path.splitext(self.basename) self.analyze_arg_spec = analyze_arg_spec self.base_branch = base_branch self.git_cache = git_cache or GitCache() self._python_module_override = False with open(path) as f: self.text = f.read() self.length = len(self.text.splitlines()) try: self.ast = ast.parse(self.text) except Exception: self.ast = None if base_branch: self.base_module = self._get_base_file() else: self.base_module = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if not self.base_module: return try: os.remove(self.base_module) except Exception: pass @property def object_name(self): return self.basename @property def object_path(self): return self.path def _python_module(self): if self.path.endswith('.py') or self._python_module_override: return True return False def _powershell_module(self): if self.path.endswith('.ps1'): return True return False def _just_docs(self): """Module can contain just docs and from __future__ boilerplate """ try: for child in self.ast.body: if not isinstance(child, ast.Assign): # allowed from __future__ imports if isinstance(child, ast.ImportFrom) and child.module == '__future__': for future_import in child.names: if future_import.name not in self.WHITELIST_FUTURE_IMPORTS: break else: continue return False return True except AttributeError: return False def _get_base_branch_module_path(self): """List all paths within lib/ansible/modules to try and match a moved module""" return self.git_cache.base_module_paths.get(self.object_name) def _has_alias(self): """Return true if the module has any aliases.""" return self.object_name in self.git_cache.head_aliased_modules def _get_base_file(self): # In case of module moves, look for the original location base_path = self._get_base_branch_module_path() command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if int(p.returncode) != 0: return None t = tempfile.NamedTemporaryFile(delete=False) t.write(stdout) t.close() return t.name def _is_new_module(self): if self._has_alias(): return False return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module) def _check_interpreter(self, powershell=False): if powershell: if not self.text.startswith('#!powershell\n'): self.reporter.error( path=self.object_path, code=102, msg='Interpreter line is not "#!powershell"' ) return if not self.text.startswith('#!/usr/bin/python'): self.reporter.error( path=self.object_path, code=101, msg='Interpreter line is not "#!/usr/bin/python"' ) def _check_type_instead_of_isinstance(self, powershell=False): if powershell: return for line_no, line in enumerate(self.text.splitlines()): typekeyword = TYPE_REGEX.match(line) if typekeyword: # TODO: add column self.reporter.error( path=self.object_path, code=403, msg=('Type comparison using type() found. ' 'Use isinstance() instead'), line=line_no + 1 ) def _check_for_sys_exit(self): if 'sys.exit(' in self.text: # TODO: Add line/col self.reporter.error( path=self.object_path, code=205, msg='sys.exit() call found. Should be exit_json/fail_json' ) def _check_gpl3_header(self): header = '\n'.join(self.text.split('\n')[:20]) if ('GNU General Public License' not in header or ('version 3' not in header and 'v3.0' not in header)): self.reporter.error( path=self.object_path, code=105, msg='GPLv3 license header not found in the first 20 lines of the module' ) elif self._is_new_module(): if len([line for line in header if 'GNU General Public License' in line]) > 1: self.reporter.error( path=self.object_path, code=108, msg='Found old style GPLv3 license header: ' 'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright' ) def _check_for_tabs(self): for line_no, line in enumerate(self.text.splitlines()): indent = INDENT_REGEX.search(line) if indent and '\t' in line: index = line.index('\t') self.reporter.error( path=self.object_path, code=402, msg='indentation contains tabs', line=line_no + 1, column=index ) def _find_blacklist_imports(self): for child in self.ast.body: names = [] if isinstance(child, ast.Import): names.extend(child.names) elif isinstance(child, TRY_EXCEPT): bodies = child.body for handler in child.handlers: bodies.extend(handler.body) for grandchild in bodies: if isinstance(grandchild, ast.Import): names.extend(grandchild.names) for name in names: # TODO: Add line/col for blacklist_import, options in BLACKLIST_IMPORTS.items(): if re.search(blacklist_import, name.name): new_only = options['new_only'] if self._is_new_module() and new_only: self.reporter.error( path=self.object_path, **options['error'] ) elif not new_only: self.reporter.error( path=self.object_path, **options['error'] ) def _find_module_utils(self, main): linenos = [] found_basic = False for child in self.ast.body: if isinstance(child, (ast.Import, ast.ImportFrom)): names = [] try: names.append(child.module) if child.module.endswith('.basic'): found_basic = True except AttributeError: pass names.extend([n.name for n in child.names]) if [n for n in names if n.startswith('ansible.module_utils')]: linenos.append(child.lineno) for name in child.names: if ('module_utils' in getattr(child, 'module', '') and isinstance(name, ast.alias) and name.name == '*'): msg = ( 208, ('module_utils imports should import specific ' 'components, not "*"') ) if self._is_new_module(): self.reporter.error( path=self.object_path, code=msg[0], msg=msg[1], line=child.lineno ) else: self.reporter.warning( path=self.object_path, code=msg[0], msg=msg[1], line=child.lineno ) if (isinstance(name, ast.alias) and name.name == 'basic'): found_basic = True if not linenos: self.reporter.error( path=self.object_path, code=201, msg='Did not find a module_utils import' ) elif not found_basic: self.reporter.warning( path=self.object_path, code=292, msg='Did not find "ansible.module_utils.basic" import' ) return linenos def _get_first_callable(self): linenos = [] for child in self.ast.body: if isinstance(child, (ast.FunctionDef, ast.ClassDef)): linenos.append(child.lineno) return min(linenos) def _find_main_call(self): lineno = False if_bodies = [] for child in self.ast.body: if isinstance(child, ast.If): try: if child.test.left.id == '__name__': if_bodies.extend(child.body) except AttributeError: pass bodies = self.ast.body bodies.extend(if_bodies) for child in bodies: # validate that the next to last line is 'if __name__ == "__main__"' if child.lineno == (self.length - 1): mainchecked = False try: if isinstance(child, ast.If) and \ child.test.left.id == '__name__' and \ len(child.test.ops) == 1 and \ isinstance(child.test.ops[0], ast.Eq) and \ child.test.comparators[0].s == '__main__': mainchecked = True except Exception: pass if not mainchecked: self.reporter.error( path=self.object_path, code=109, msg='Next to last line should be: if __name__ == "__main__":', line=child.lineno ) # validate that the final line is a call to main() if isinstance(child, ast.Expr): if isinstance(child.value, ast.Call): if (isinstance(child.value.func, ast.Name) and child.value.func.id == 'main'): lineno = child.lineno if lineno < self.length - 1: self.reporter.error( path=self.object_path, code=104, msg='Call to main() not the last line', line=lineno ) if not lineno: self.reporter.error( path=self.object_path, code=103, msg='Did not find a call to main' ) return lineno or 0 def _find_has_import(self): for child in self.ast.body: found_try_except_import = False found_has = False if isinstance(child, TRY_EXCEPT): bodies = child.body for handler in child.handlers: bodies.extend(handler.body) for grandchild in bodies: if isinstance(grandchild, ast.Import): found_try_except_import = True if isinstance(grandchild, ast.Assign): for target in grandchild.targets: if target.id.lower().startswith('has_'): found_has = True if found_try_except_import and not found_has: # TODO: Add line/col self.reporter.warning( path=self.object_path, code=291, msg='Found Try/Except block without HAS_ assignment' ) def _ensure_imports_below_docs(self, doc_info, first_callable): try: min_doc_line = min( [doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']] ) except ValueError: # We can't perform this validation, as there are no DOCs provided at all return max_doc_line = max( [doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']] ) import_lines = [] for child in self.ast.body: if isinstance(child, (ast.Import, ast.ImportFrom)): if isinstance(child, ast.ImportFrom) and child.module == '__future__': # allowed from __future__ imports for future_import in child.names: if future_import.name not in self.WHITELIST_FUTURE_IMPORTS: self.reporter.error( path=self.object_path, code=209, msg=('Only the following from __future__ imports are allowed: %s' % ', '.join(self.WHITELIST_FUTURE_IMPORTS)), line=child.lineno ) break else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import continue import_lines.append(child.lineno) if child.lineno < min_doc_line: self.reporter.error( path=self.object_path, code=106, msg=('Import found before documentation variables. ' 'All imports must appear below ' 'DOCUMENTATION/EXAMPLES/RETURN/ANSIBLE_METADATA.'), line=child.lineno ) break elif isinstance(child, TRY_EXCEPT): bodies = child.body for handler in child.handlers: bodies.extend(handler.body) for grandchild in bodies: if isinstance(grandchild, (ast.Import, ast.ImportFrom)): import_lines.append(grandchild.lineno) if grandchild.lineno < min_doc_line: self.reporter.error( path=self.object_path, code=106, msg=('Import found before documentation ' 'variables. All imports must appear below ' 'DOCUMENTATION/EXAMPLES/RETURN/' 'ANSIBLE_METADATA.'), line=child.lineno ) break for import_line in import_lines: if not (max_doc_line < import_line < first_callable): msg = ( 107, ('Imports should be directly below DOCUMENTATION/EXAMPLES/' 'RETURN/ANSIBLE_METADATA.') ) if self._is_new_module(): self.reporter.error( path=self.object_path, code=msg[0], msg=msg[1], line=import_line ) else: self.reporter.warning( path=self.object_path, code=msg[0], msg=msg[1], line=import_line ) def _validate_ps_replacers(self): # loop all (for/else + error) # get module list for each # check "shape" of each module name module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)' found_requires = False for req_stmt in re.finditer(module_requires, self.text): found_requires = True # this will bomb on dictionary format - "don't do that" module_list = [x.strip() for x in req_stmt.group(1).split(',')] if len(module_list) > 1: self.reporter.error( path=self.object_path, code=210, msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0) ) continue module_name = module_list[0] if module_name.lower().endswith('.psm1'): self.reporter.error( path=self.object_path, code=211, msg='Module #Requires should not end in .psm1: "%s"' % module_name ) # also accept the legacy #POWERSHELL_COMMON replacer signal if not found_requires and REPLACER_WINDOWS not in self.text: self.reporter.error( path=self.object_path, code=207, msg='No Ansible.ModuleUtils module requirements/imports found' ) def _find_ps_docs_py_file(self): if self.object_name in self.PS_DOC_BLACKLIST: return py_path = self.path.replace('.ps1', '.py') if not os.path.isfile(py_path): self.reporter.error( path=self.object_path, code=503, msg='Missing python documentation file' ) def _get_docs(self): docs = { 'DOCUMENTATION': { 'value': None, 'lineno': 0, 'end_lineno': 0, }, 'EXAMPLES': { 'value': None, 'lineno': 0, 'end_lineno': 0, }, 'RETURN': { 'value': None, 'lineno': 0, 'end_lineno': 0, }, 'ANSIBLE_METADATA': { 'value': None, 'lineno': 0, 'end_lineno': 0, } } for child in self.ast.body: if isinstance(child, ast.Assign): for grandchild in child.targets: if grandchild.id == 'DOCUMENTATION': docs['DOCUMENTATION']['value'] = child.value.s docs['DOCUMENTATION']['lineno'] = child.lineno docs['DOCUMENTATION']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) elif grandchild.id == 'EXAMPLES': docs['EXAMPLES']['value'] = child.value.s docs['EXAMPLES']['lineno'] = child.lineno docs['EXAMPLES']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) elif grandchild.id == 'RETURN': docs['RETURN']['value'] = child.value.s docs['RETURN']['lineno'] = child.lineno docs['RETURN']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) elif grandchild.id == 'ANSIBLE_METADATA': docs['ANSIBLE_METADATA']['value'] = child.value docs['ANSIBLE_METADATA']['lineno'] = child.lineno try: docs['ANSIBLE_METADATA']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) except AttributeError: docs['ANSIBLE_METADATA']['end_lineno'] = ( child.value.values[-1].lineno ) return docs def _validate_docs_schema(self, doc, schema, name, error_code): # TODO: Add line/col errors = [] try: schema(doc) except Exception as e: for error in e.errors: error.data = doc errors.extend(e.errors) for error in errors: path = [str(p) for p in error.path] if isinstance(error.data, dict): error_message = humanize_error(error.data, error) else: error_message = error self.reporter.error( path=self.object_path, code=error_code, msg='%s.%s: %s' % (name, '.'.join(path), error_message) ) def _validate_docs(self): doc_info = self._get_docs() deprecated = False if not bool(doc_info['DOCUMENTATION']['value']): self.reporter.error( path=self.object_path, code=301, msg='No DOCUMENTATION provided' ) else: doc, errors, traces = parse_yaml( doc_info['DOCUMENTATION']['value'], doc_info['DOCUMENTATION']['lineno'], self.name, 'DOCUMENTATION' ) for error in errors: self.reporter.error( path=self.object_path, code=302, **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) if not errors and not traces: with CaptureStd(): try: get_docstring(self.path, fragment_loader, verbose=True) except AssertionError: fragment = doc['extends_documentation_fragment'] self.reporter.error( path=self.object_path, code=303, msg='DOCUMENTATION fragment missing: %s' % fragment ) except Exception: self.reporter.trace( path=self.object_path, tracebk=traceback.format_exc() ) self.reporter.error( path=self.object_path, code=304, msg='Unknown DOCUMENTATION error, see TRACE' ) if 'options' in doc and doc['options'] is None: self.reporter.error( path=self.object_path, code=320, msg='DOCUMENTATION.options must be a dictionary/hash when used', ) if self.object_name.startswith('_') and not os.path.islink(self.object_path): deprecated = True if 'deprecated' not in doc or not doc.get('deprecated'): self.reporter.error( path=self.object_path, code=318, msg='Module deprecated, but DOCUMENTATION.deprecated is missing' ) if os.path.islink(self.object_path): # This module has an alias, which we can tell as it's a symlink # Rather than checking for `module: $filename` we need to check against the true filename self._validate_docs_schema(doc, doc_schema(os.readlink(self.object_path).split('.')[0]), 'DOCUMENTATION', 305) else: # This is the normal case self._validate_docs_schema(doc, doc_schema(self.object_name.split('.')[0]), 'DOCUMENTATION', 305) self._check_version_added(doc) self._check_for_new_args(doc) if not bool(doc_info['EXAMPLES']['value']): self.reporter.error( path=self.object_path, code=310, msg='No EXAMPLES provided' ) else: _, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'], doc_info['EXAMPLES']['lineno'], self.name, 'EXAMPLES', load_all=True) for error in errors: self.reporter.error( path=self.object_path, code=311, **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) if not bool(doc_info['RETURN']['value']): if self._is_new_module(): self.reporter.error( path=self.object_path, code=312, msg='No RETURN provided' ) else: self.reporter.warning( path=self.object_path, code=312, msg='No RETURN provided' ) else: data, errors, traces = parse_yaml(doc_info['RETURN']['value'], doc_info['RETURN']['lineno'], self.name, 'RETURN') if data: for ret_key in data: self._validate_docs_schema(data[ret_key], return_schema(data[ret_key]), 'RETURN.%s' % ret_key, 319) for error in errors: self.reporter.error( path=self.object_path, code=313, **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) if not bool(doc_info['ANSIBLE_METADATA']['value']): self.reporter.error( path=self.object_path, code=314, msg='No ANSIBLE_METADATA provided' ) else: metadata = None if isinstance(doc_info['ANSIBLE_METADATA']['value'], ast.Dict): metadata = ast.literal_eval( doc_info['ANSIBLE_METADATA']['value'] ) else: metadata, errors, traces = parse_yaml( doc_info['ANSIBLE_METADATA']['value'].s, doc_info['ANSIBLE_METADATA']['lineno'], self.name, 'ANSIBLE_METADATA' ) for error in errors: self.reporter.error( path=self.object_path, code=315, **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) if metadata: self._validate_docs_schema(metadata, metadata_1_1_schema(deprecated), 'ANSIBLE_METADATA', 316) return doc_info def _check_version_added(self, doc): if not self._is_new_module(): return try: version_added = StrictVersion(str(doc.get('version_added', '0.0') or '0.0')) except ValueError: version_added = doc.get('version_added', '0.0') self.reporter.error( path=self.object_path, code=306, msg='version_added is not a valid version number: %r' % version_added ) return should_be = '.'.join(ansible_version.split('.')[:2]) strict_ansible_version = StrictVersion(should_be) if (version_added < strict_ansible_version or strict_ansible_version < version_added): self.reporter.error( path=self.object_path, code=307, msg='version_added should be %s. Currently %s' % (should_be, version_added) ) def _validate_argument_spec(self): if not self.analyze_arg_spec: return try: spec = get_argument_spec(self.path) except AnsibleModuleImportError: self.reporter.error( path=self.object_path, code=321, msg='Exception attempting to import module for argument_spec introspection' ) self.reporter.trace( path=self.object_path, tracebk=traceback.format_exc() ) return for arg, data in spec.items(): if data.get('required') and data.get('default', object) != object: self.reporter.error( path=self.object_path, code=317, msg=('"%s" is marked as required but specifies ' 'a default. Arguments with a default ' 'should not be marked as required' % arg) ) def _check_for_new_args(self, doc): if not self.base_branch or self._is_new_module(): return with CaptureStd(): try: existing_doc = get_docstring(self.base_module, fragment_loader, verbose=True)[0] existing_options = existing_doc.get('options', {}) or {} except AssertionError: fragment = doc['extends_documentation_fragment'] self.reporter.warning( path=self.object_path, code=392, msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment ) return except Exception as e: self.reporter.warning_trace( path=self.object_path, tracebk=e ) self.reporter.warning( path=self.object_path, code=391, msg=('Unknown pre-existing DOCUMENTATION ' 'error, see TRACE. Submodule refs may ' 'need updated') ) return try: mod_version_added = StrictVersion( str(existing_doc.get('version_added', '0.0')) ) except ValueError: mod_version_added = StrictVersion('0.0') options = doc.get('options', {}) or {} should_be = '.'.join(ansible_version.split('.')[:2]) strict_ansible_version = StrictVersion(should_be) for option, details in options.items(): try: names = [option] + details.get('aliases', []) except (TypeError, AttributeError): # Reporting of this syntax error will be handled by schema validation. continue if any(name in existing_options for name in names): continue try: version_added = StrictVersion( str(details.get('version_added', '0.0')) ) except ValueError: version_added = details.get('version_added', '0.0') self.reporter.error( path=self.object_path, code=308, msg=('version_added for new option (%s) ' 'is not a valid version number: %r' % (option, version_added)) ) continue except Exception: # If there is any other exception it should have been caught # in schema validation, so we won't duplicate errors by # listing it again continue if (strict_ansible_version != mod_version_added and (version_added < strict_ansible_version or strict_ansible_version < version_added)): self.reporter.error( path=self.object_path, code=309, msg=('version_added for new option (%s) should ' 'be %s. Currently %s' % (option, should_be, version_added)) ) @staticmethod def is_blacklisted(path): base_name = os.path.basename(path) file_name, _ = os.path.splitext(base_name) if file_name.startswith('_') and os.path.islink(path): return True if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.BLACKLIST): return True for pat in ModuleValidator.BLACKLIST_PATTERNS: if fnmatch(base_name, pat): return True return False def validate(self): super(ModuleValidator, self).validate() if not self._python_module() and not self._powershell_module(): self.reporter.error( path=self.object_path, code=501, msg=('Official Ansible modules must have a .py ' 'extension for python modules or a .ps1 ' 'for powershell modules') ) self._python_module_override = True if self._python_module() and self.ast is None: self.reporter.error( path=self.object_path, code=401, msg='Python SyntaxError while parsing module' ) try: compile(self.text, self.path, 'exec') except Exception: self.reporter.trace( path=self.object_path, tracebk=traceback.format_exc() ) return if self._python_module(): doc_info = self._validate_docs() if self._python_module() and not self._just_docs(): self._validate_argument_spec() self._check_for_sys_exit() self._find_blacklist_imports() main = self._find_main_call() self._find_module_utils(main) self._find_has_import() self._check_for_tabs() first_callable = self._get_first_callable() self._ensure_imports_below_docs(doc_info, first_callable) if self._powershell_module(): self._validate_ps_replacers() self._find_ps_docs_py_file() self._check_gpl3_header() if not self._just_docs(): self._check_interpreter(powershell=self._powershell_module()) self._check_type_instead_of_isinstance( powershell=self._powershell_module() ) class PythonPackageValidator(Validator): BLACKLIST_FILES = frozenset(('__pycache__',)) def __init__(self, path, reporter=None): super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter()) self.path = path self.basename = os.path.basename(path) @property def object_name(self): return self.basename @property def object_path(self): return self.path def validate(self): super(PythonPackageValidator, self).validate() if self.basename in self.BLACKLIST_FILES: return init_file = os.path.join(self.path, '__init__.py') if not os.path.exists(init_file): self.reporter.error( path=self.object_path, code=502, msg='Ansible module subdirectories must contain an __init__.py' ) def re_compile(value): """ Argparse expects things to raise TypeError, re.compile raises an re.error exception This function is a shorthand to convert the re.error exception to a TypeError """ try: return re.compile(value) except re.error as e: raise TypeError(e) def main(): parser = argparse.ArgumentParser(prog="validate-modules") parser.add_argument('modules', nargs='+', help='Path to module or module directory') parser.add_argument('-w', '--warnings', help='Show warnings', action='store_true') parser.add_argument('--exclude', help='RegEx exclusion pattern', type=re_compile) parser.add_argument('--arg-spec', help='Analyze module argument spec', action='store_true', default=False) parser.add_argument('--base-branch', default=None, help='Used in determining if new options were added') parser.add_argument('--format', choices=['json', 'plain'], default='plain', help='Output format. Default: "%(default)s"') parser.add_argument('--output', default='-', help='Output location, use "-" for stdout. ' 'Default "%(default)s"') args = parser.parse_args() args.modules[:] = [m.rstrip('/') for m in args.modules] reporter = Reporter() git_cache = GitCache(args.base_branch) check_dirs = set() for module in args.modules: if os.path.isfile(module): path = module if args.exclude and args.exclude.search(path): continue if ModuleValidator.is_blacklisted(path): continue with ModuleValidator(path, analyze_arg_spec=args.arg_spec, base_branch=args.base_branch, git_cache=git_cache, reporter=reporter) as mv: mv.validate() check_dirs.add(os.path.dirname(path)) for root, dirs, files in os.walk(module): basedir = root[len(module) + 1:].split('/', 1)[0] if basedir in BLACKLIST_DIRS: continue for dirname in dirs: if root == module and dirname in BLACKLIST_DIRS: continue path = os.path.join(root, dirname) if args.exclude and args.exclude.search(path): continue check_dirs.add(path) for filename in files: path = os.path.join(root, filename) if args.exclude and args.exclude.search(path): continue if ModuleValidator.is_blacklisted(path): continue with ModuleValidator(path, analyze_arg_spec=args.arg_spec, base_branch=args.base_branch, git_cache=git_cache, reporter=reporter) as mv: mv.validate() for path in sorted(check_dirs): pv = PythonPackageValidator(path, reporter=reporter) pv.validate() if args.format == 'plain': sys.exit(reporter.plain(warnings=args.warnings, output=args.output)) else: sys.exit(reporter.json(warnings=args.warnings, output=args.output)) class GitCache(object): def __init__(self, base_branch): self.base_branch = base_branch if self.base_branch: self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/']) else: self.base_tree = [] try: self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/']) except GitError as ex: if ex.status == 128: # fallback when there is no .git directory self.head_tree = self._get_module_files() else: raise except OSError as ex: if ex.errno == errno.ENOENT: # fallback when git is not installed self.head_tree = self._get_module_files() else: raise self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1')) self.base_module_paths.pop('__init__.py', None) self.head_aliased_modules = set() for path in self.head_tree: filename = os.path.basename(path) if filename.startswith('_') and filename != '__init__.py': if os.path.islink(path): self.head_aliased_modules.add(os.path.basename(os.path.realpath(path))) @staticmethod def _get_module_files(): module_files = [] for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'): for file_name in file_names: module_files.append(os.path.join(dir_path, file_name)) return module_files @staticmethod def _git(args): cmd = ['git'] + args p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: raise GitError(stderr, p.returncode) return stdout.decode('utf-8').splitlines() class GitError(Exception): def __init__(self, message, status): super(GitError, self).__init__(message) self.status = status if __name__ == '__main__': try: main() except KeyboardInterrupt: pass
gpl-3.0
8,874,566,115,994,565,000
35.585044
132
0.487936
false
sixdub/Minions
scans/models.py
1
1839
from django.db import models from django.contrib.auth.models import User from django.core.exceptions import ValidationError import re # Create your models here. class Scan(models.Model): name=models.CharField(max_length=200,default="") hosts=models.TextField(default="") profile=models.ForeignKey("Scan_Profile", related_name="scanprofile") user = models.ForeignKey(User,blank=True, null=True, related_name="user") version =models.CharField(max_length=100, blank=True, null=True) summary=models.TextField(blank=True, null=True) finished=models.BooleanField(default=False) def __unicode__(self): return self.args #only allow ip addresses and properly formatted host names to pass through. allow comma separated and split by line. def isvalid(self, el): el = el.rstrip() fqdn = re.findall("(?=^.{4,255}$)(^((?!-)[a-zA-Z0-9-]{0,62}[a-zA-Z0-9]\.)+[a-zA-Z]{2,63}$)", el) ips = re.findall("(?:[0-9]{1,3}\.){3}[0-9]{1,3}", el) if len(ips) + len(fqdn) <= 0: raise ValidationError("Proper FQDN or IP not provided") def clean(self): for line in self.hosts.split("\n"): #if your hosts field can have multiple lines, you can remove this elems = line.split(",")#creates an array from comma separated values if line: for el in elems: self.isvalid(el) class Scan_Profile(models.Model): name=models.CharField(max_length=100, default="", unique=True) author=models.ForeignKey(User, related_name="profile_author") cmdline=models.TextField(default="") def __unicode__(self): return self.name #dont allow any output format. We handle that :) def clean(self): if "nmap" in self.cmdline: raise ValidationError('Do not place "nmap" in the command line arguments!') m = re.findall("-o[A-Z]", self.cmdline) if m: raise ValidationError('No "-o" flags... We will decide the output for you!')
gpl-2.0
-5,329,743,307,292,903,000
35.058824
118
0.703643
false
kubevirt/client-python
kubevirt/models/v1_domain_spec.py
1
10040
# coding: utf-8 """ KubeVirt API This is KubeVirt API an add-on for Kubernetes. OpenAPI spec version: 1.0.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1DomainSpec(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'chassis': 'V1Chassis', 'clock': 'V1Clock', 'cpu': 'V1CPU', 'devices': 'V1Devices', 'features': 'V1Features', 'firmware': 'V1Firmware', 'io_threads_policy': 'str', 'machine': 'V1Machine', 'memory': 'V1Memory', 'resources': 'V1ResourceRequirements' } attribute_map = { 'chassis': 'chassis', 'clock': 'clock', 'cpu': 'cpu', 'devices': 'devices', 'features': 'features', 'firmware': 'firmware', 'io_threads_policy': 'ioThreadsPolicy', 'machine': 'machine', 'memory': 'memory', 'resources': 'resources' } def __init__(self, chassis=None, clock=None, cpu=None, devices=None, features=None, firmware=None, io_threads_policy=None, machine=None, memory=None, resources=None): """ V1DomainSpec - a model defined in Swagger """ self._chassis = None self._clock = None self._cpu = None self._devices = None self._features = None self._firmware = None self._io_threads_policy = None self._machine = None self._memory = None self._resources = None if chassis is not None: self.chassis = chassis if clock is not None: self.clock = clock if cpu is not None: self.cpu = cpu self.devices = devices if features is not None: self.features = features if firmware is not None: self.firmware = firmware if io_threads_policy is not None: self.io_threads_policy = io_threads_policy if machine is not None: self.machine = machine if memory is not None: self.memory = memory if resources is not None: self.resources = resources @property def chassis(self): """ Gets the chassis of this V1DomainSpec. Chassis specifies the chassis info passed to the domain. :return: The chassis of this V1DomainSpec. :rtype: V1Chassis """ return self._chassis @chassis.setter def chassis(self, chassis): """ Sets the chassis of this V1DomainSpec. Chassis specifies the chassis info passed to the domain. :param chassis: The chassis of this V1DomainSpec. :type: V1Chassis """ self._chassis = chassis @property def clock(self): """ Gets the clock of this V1DomainSpec. Clock sets the clock and timers of the vmi. :return: The clock of this V1DomainSpec. :rtype: V1Clock """ return self._clock @clock.setter def clock(self, clock): """ Sets the clock of this V1DomainSpec. Clock sets the clock and timers of the vmi. :param clock: The clock of this V1DomainSpec. :type: V1Clock """ self._clock = clock @property def cpu(self): """ Gets the cpu of this V1DomainSpec. CPU allow specified the detailed CPU topology inside the vmi. :return: The cpu of this V1DomainSpec. :rtype: V1CPU """ return self._cpu @cpu.setter def cpu(self, cpu): """ Sets the cpu of this V1DomainSpec. CPU allow specified the detailed CPU topology inside the vmi. :param cpu: The cpu of this V1DomainSpec. :type: V1CPU """ self._cpu = cpu @property def devices(self): """ Gets the devices of this V1DomainSpec. Devices allows adding disks, network interfaces, and others :return: The devices of this V1DomainSpec. :rtype: V1Devices """ return self._devices @devices.setter def devices(self, devices): """ Sets the devices of this V1DomainSpec. Devices allows adding disks, network interfaces, and others :param devices: The devices of this V1DomainSpec. :type: V1Devices """ if devices is None: raise ValueError("Invalid value for `devices`, must not be `None`") self._devices = devices @property def features(self): """ Gets the features of this V1DomainSpec. Features like acpi, apic, hyperv, smm. :return: The features of this V1DomainSpec. :rtype: V1Features """ return self._features @features.setter def features(self, features): """ Sets the features of this V1DomainSpec. Features like acpi, apic, hyperv, smm. :param features: The features of this V1DomainSpec. :type: V1Features """ self._features = features @property def firmware(self): """ Gets the firmware of this V1DomainSpec. Firmware. :return: The firmware of this V1DomainSpec. :rtype: V1Firmware """ return self._firmware @firmware.setter def firmware(self, firmware): """ Sets the firmware of this V1DomainSpec. Firmware. :param firmware: The firmware of this V1DomainSpec. :type: V1Firmware """ self._firmware = firmware @property def io_threads_policy(self): """ Gets the io_threads_policy of this V1DomainSpec. Controls whether or not disks will share IOThreads. Omitting IOThreadsPolicy disables use of IOThreads. One of: shared, auto :return: The io_threads_policy of this V1DomainSpec. :rtype: str """ return self._io_threads_policy @io_threads_policy.setter def io_threads_policy(self, io_threads_policy): """ Sets the io_threads_policy of this V1DomainSpec. Controls whether or not disks will share IOThreads. Omitting IOThreadsPolicy disables use of IOThreads. One of: shared, auto :param io_threads_policy: The io_threads_policy of this V1DomainSpec. :type: str """ self._io_threads_policy = io_threads_policy @property def machine(self): """ Gets the machine of this V1DomainSpec. Machine type. :return: The machine of this V1DomainSpec. :rtype: V1Machine """ return self._machine @machine.setter def machine(self, machine): """ Sets the machine of this V1DomainSpec. Machine type. :param machine: The machine of this V1DomainSpec. :type: V1Machine """ self._machine = machine @property def memory(self): """ Gets the memory of this V1DomainSpec. Memory allow specifying the VMI memory features. :return: The memory of this V1DomainSpec. :rtype: V1Memory """ return self._memory @memory.setter def memory(self, memory): """ Sets the memory of this V1DomainSpec. Memory allow specifying the VMI memory features. :param memory: The memory of this V1DomainSpec. :type: V1Memory """ self._memory = memory @property def resources(self): """ Gets the resources of this V1DomainSpec. Resources describes the Compute Resources required by this vmi. :return: The resources of this V1DomainSpec. :rtype: V1ResourceRequirements """ return self._resources @resources.setter def resources(self, resources): """ Sets the resources of this V1DomainSpec. Resources describes the Compute Resources required by this vmi. :param resources: The resources of this V1DomainSpec. :type: V1ResourceRequirements """ self._resources = resources def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1DomainSpec): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
apache-2.0
-6,167,048,185,573,170,000
25.560847
170
0.566036
false
ConnectedSystems/veneer-py
veneer/navigate.py
1
2140
''' Prototype functionality for interacting with the Source model directly, including tab-completion in IPython/Jupyter. Eg v = veneer.Veneer() scenario = Queryable(v) scenario.Name = 'New Scenario Name' ''' class Queryable(object): def __init__(self,v,path='scenario',namespace=None): self._v = v self._path = path self._init = False self._ns = namespace def _eval_(self): return self._v.model.get(self._path,namespace=self._ns) def _child_(self,path): val = Queryable(self._v,'%s.%s'%(self._path,path),namespace=self._ns) return val def _double_quote_(self,maybe_string): v = maybe_string if not isinstance(v,str): return v if not "'" in v: return "'%s'"%v if not '"' in v: return '"%s"'%v v = v.replace('"','\\"') return '"%s"'%v def _child_idx_(self,ix): return Queryable(self._v,'%s[%s]'%(self._path,str(ix)),namespace=self._ns) def _initialise_children_(self,entries): if self._init: return self._init = True for r in entries: if r[:2]=='__': continue super(Queryable,self).__setattr__(r,self._child_(r)) def _run_script(self,script): return self._v.model._safe_run('%s\n%s'%(self._v.model._init_script(self._ns),script)) def __call__(self,*args,**kwargs): return self._v.model.call(self._path+str(tuple(args))) def __repr__(self): return str(self._eval_()) def __dir__(self): res = [e['Value'] for e in self._run_script('dir(%s)'%(self._path))['Response']['Value']] self._initialise_children_(res) return res def __getattr__(self,attrname): return self._child_(attrname) def __getitem__(self,ix): return self._child_idx_(ix) def __setattr__(self,a,v): if a.startswith('_'): return super(Queryable,self).__setattr__(a,v) v = self._double_quote_(v) if not self._v.model.set('%s.%s'%(self._path,a),v): raise Exception("Couldn't set property")
isc
-557,938,653,125,760,300
28.722222
119
0.550935
false
jonathanchu/django-statusboard
statusboard/statusboard/wsgi.py
1
1575
""" WSGI config for statusboard project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os from os.path import abspath, dirname from sys import path SITE_ROOT = dirname(dirname(abspath(__file__))) path.append(SITE_ROOT) # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "statusboard.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "statusboard.settings.production") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
mit
-7,914,210,772,737,524,000
41.567568
82
0.792381
false
pwwang/bioprocs
bioprocs/utils/shell2.py
1
1995
import sys from modkit import Modkit import cmdy DEFAULT_CONFIG = dict( default = dict(_raise = True), bedtools = dict(_prefix = '-'), biobambam = dict(_sep = '=', _prefix = ''), bowtie2 = dict(_dupkey = True), dtoxog = dict(_out = cmdy.DEVERR, _prefix = '-'), sort = dict(_sep = '', _dupkey = True), gatk3 = dict(_dupkey = True), hla_la = dict(_raw = True), liftover = dict(_prefix = '-', _sep = '='), oncotator = dict(_sep = 'auto'), optitype = dict(_dupkey = False), maf2vcf = dict(_sep = ' '), netmhc = dict(_prefix = '-'), # As of picard 2.20.5-SNAPSHOT # it's changing in the futher. See: https://github.com/broadinstitute/picard/wiki/Command-Line-Syntax-Transition-For-Users-(Pre-Transition) # Future one should be: # picard = dict(_sep = ' ', _prefix = '-') picard = dict(_sep = '=', _prefix = ''), plink = dict(_out = cmdy.DEVERR), pyclone = dict(_raw = True), razers3 = dict(_prefix = '-'), snpeff = dict(_prefix = '-'), vcfanno = dict(_prefix = '-'), vep = dict(_dupkey = True, _raw = True), ) cmdy.config._load(DEFAULT_CONFIG) def _modkit_delegate(name): return getattr(cmdy, name) # run command at foreground fg = cmdy(_fg = True, _debug = True) bg = cmdy(_bg = True, _debug = True) out = cmdy(_out = '>') pipe = cmdy(_pipe = True) ## aliases rm_rf = cmdy.rm.bake(r = True, f = True) ln_s = cmdy.ln.bake(s = True) kill_9 = cmdy.kill.bake(s = 9) wc_l = cmdy.wc.bake(l = True) cp = copy = cmdy.cp mv = move = cmdy.mv which = lambda x: cmdy.which(x).strip() runcmd = lambda cmd: cmdy.bash(c = cmd) def load_config(conf = None, **kwargs): conf = conf or {} conf.update(kwargs) conf2load = {'default': DEFAULT_CONFIG['default']} for key, val in conf.items(): conf2load[key] = DEFAULT_CONFIG.get(key, {}).copy() conf2load[key].update(val if isinstance(val, dict) else {'_exe': val}) cmdy.config._load(conf2load) fg.config._load(conf2load) out.config._load(conf2load) Modkit()
mit
2,583,899,961,950,900,000
28.776119
140
0.606015
false
WojciechMigda/KAGGLE-prudential-life-insurance-assessment
src/XGB_offset_reg.py
1
90763
#!/opt/anaconda2/bin/python # -*- coding: utf-8 -*- """ ################################################################################ # # Copyright (c) 2016 Wojciech Migda # All rights reserved # Distributed under the terms of the MIT license # ################################################################################ # # Filename: XGB_offset_reg.py # # Decription: # XGBoost with offset fitting (based on Kaggle scripts) # # Authors: # Wojciech Migda # ################################################################################ # # History: # -------- # Date Who Ticket Description # ---------- --- --------- ------------------------------------------------ # 2016-01-22 wm Initial version # ################################################################################ """ from __future__ import print_function DEBUG = False __all__ = [] __version__ = "0.0.1" __date__ = '2016-01-22' __updated__ = '2016-01-22' NOMINALS = ['Product_Info_1', 'Product_Info_2', 'Product_Info_3', 'Product_Info_5', 'Product_Info_6', 'Product_Info_7', 'Employment_Info_2', 'Employment_Info_3', 'Employment_Info_5', 'InsuredInfo_1', 'InsuredInfo_2', 'InsuredInfo_3', 'InsuredInfo_4', 'InsuredInfo_5', 'InsuredInfo_6', 'InsuredInfo_7', 'Insurance_History_1', 'Insurance_History_2', 'Insurance_History_3', 'Insurance_History_4', 'Insurance_History_7', 'Insurance_History_8', 'Insurance_History_9', 'Family_Hist_1', 'Medical_History_2', 'Medical_History_3', 'Medical_History_4', 'Medical_History_5', 'Medical_History_6', 'Medical_History_7', 'Medical_History_8', 'Medical_History_9', 'Medical_History_11', 'Medical_History_12', 'Medical_History_13', 'Medical_History_14', 'Medical_History_16', 'Medical_History_17', 'Medical_History_18', 'Medical_History_19', 'Medical_History_20', 'Medical_History_21', 'Medical_History_22', 'Medical_History_23', 'Medical_History_25', 'Medical_History_26', 'Medical_History_27', 'Medical_History_28', 'Medical_History_29', 'Medical_History_30', 'Medical_History_31', 'Medical_History_33', 'Medical_History_34', 'Medical_History_35', 'Medical_History_36', 'Medical_History_37', 'Medical_History_38', 'Medical_History_39', 'Medical_History_40', 'Medical_History_41'] NOMINALS_2 = ['Product_Info_1', 'Product_Info_5', 'Product_Info_6', 'Employment_Info_3', 'Employment_Info_5', 'InsuredInfo_2', 'InsuredInfo_4', 'InsuredInfo_5', 'InsuredInfo_6', 'InsuredInfo_7', 'Insurance_History_1', 'Medical_History_4', 'Medical_History_22'] NOMINALS_3 = ['Product_Info_7', 'InsuredInfo_1', 'Insurance_History_2', 'Insurance_History_3', 'Insurance_History_4', 'Insurance_History_7', 'Insurance_History_8', 'Insurance_History_9', 'Family_Hist_1', 'Medical_History_3', 'Medical_History_5', 'Medical_History_6', 'Medical_History_7', 'Medical_History_8', 'Medical_History_9', 'Medical_History_11', 'Medical_History_12', 'Medical_History_13', 'Medical_History_14', 'Medical_History_16', 'Medical_History_17', 'Medical_History_18', 'Medical_History_19', 'Medical_History_20', 'Medical_History_21', 'Medical_History_23', 'Medical_History_25', 'Medical_History_26', 'Medical_History_27', 'Medical_History_28', 'Medical_History_29', 'Medical_History_30', 'Medical_History_31', 'Medical_History_33', 'Medical_History_34', 'Medical_History_35', 'Medical_History_36', 'Medical_History_37', 'Medical_History_38', 'Medical_History_39', 'Medical_History_40', 'Medical_History_41'] NOMINALS_GE4 = [ 'InsuredInfo_3', # 11 'Product_Info_2', # 19 'Product_Info_3', # 38 'Employment_Info_2', # 38 'Medical_History_2' # 628 ] CONTINUOUS = ['Product_Info_4', 'Ins_Age', 'Ht', 'Wt', 'BMI', 'Employment_Info_1', 'Employment_Info_4', 'Employment_Info_6', 'Insurance_History_5', 'Family_Hist_2', 'Family_Hist_3', 'Family_Hist_4', 'Family_Hist_5'] DISCRETE = ['Medical_History_1', 'Medical_History_10', 'Medical_History_15', 'Medical_History_24', 'Medical_History_32'] BOOLEANS = ['Medical_Keyword_' + str(i + 1) for i in range(48)] TO_DROP = ['Medical_Keyword_13', 'Product_Info_7_2', 'InsuredInfo_1_3', 'Insurance_History_2_2', 'Insurance_History_3_2', 'Medical_History_3_1', 'Medical_History_5_3', 'Medical_History_6_2', 'Medical_History_8_1', 'Medical_History_9_3', 'Medical_History_11_1', 'Medical_History_12_1', 'Medical_History_13_2', 'Medical_History_16_2', 'Medical_History_17_1', 'Medical_History_18_3', 'Medical_History_19_3', 'Medical_History_20_3', 'Medical_History_21_3', 'Medical_History_23_2', 'Medical_History_25_3', 'Medical_History_26_1', 'Medical_History_27_2', 'Medical_History_28_3', 'Medical_History_29_2', 'Medical_History_30_1', 'Medical_History_31_2', 'Medical_History_33_2', 'Medical_History_34_2', 'Medical_History_35_1', 'Medical_History_35_2', 'Medical_History_35_3', 'Medical_History_37_3', 'Medical_History_38_3', 'Medical_History_39_2', 'Medical_History_40_2', 'Medical_History_41_2', 'InsuredInfo_3_9', 'Product_Info_2_7', 'Product_Info_2_9', 'Product_Info_2_13', 'Product_Info_2_14', 'Product_Info_3_1', 'Product_Info_3_2', 'Product_Info_3_3', 'Product_Info_3_4', 'Product_Info_3_5', 'Product_Info_3_6', 'Product_Info_3_7', 'Product_Info_3_8', 'Product_Info_3_9', 'Product_Info_3_11', 'Product_Info_3_12', 'Product_Info_3_13', 'Product_Info_3_14', 'Product_Info_3_15', 'Product_Info_3_16', 'Product_Info_3_17', 'Product_Info_3_18', 'Product_Info_3_19', 'Product_Info_3_20', 'Product_Info_3_21', 'Product_Info_3_22', 'Product_Info_3_23', 'Product_Info_3_24', 'Product_Info_3_25', 'Product_Info_3_27', 'Product_Info_3_28', 'Product_Info_3_30', 'Product_Info_3_32', 'Product_Info_3_33', 'Product_Info_3_34', 'Product_Info_3_35', 'Product_Info_3_36', 'Product_Info_3_37', 'Product_Info_3_38', 'Employment_Info_2_2', 'Employment_Info_2_4', 'Employment_Info_2_5', 'Employment_Info_2_6', 'Employment_Info_2_7', 'Employment_Info_2_8', 'Employment_Info_2_10', 'Employment_Info_2_13', 'Employment_Info_2_15', 'Employment_Info_2_16', 'Employment_Info_2_17', 'Employment_Info_2_18', 'Employment_Info_2_19', 'Employment_Info_2_20', 'Employment_Info_2_21', 'Employment_Info_2_22', 'Employment_Info_2_23', 'Employment_Info_2_24', 'Employment_Info_2_25', 'Employment_Info_2_26', 'Employment_Info_2_27', 'Employment_Info_2_28', 'Employment_Info_2_29', 'Employment_Info_2_30', 'Employment_Info_2_31', 'Employment_Info_2_33', 'Employment_Info_2_34', 'Employment_Info_2_35', 'Employment_Info_2_36', 'Employment_Info_2_37', 'Employment_Info_2_38', 'Medical_History_2_1', 'Medical_History_2_2', 'Medical_History_2_4', 'Medical_History_2_5', 'Medical_History_2_6', 'Medical_History_2_7', 'Medical_History_2_8', 'Medical_History_2_9', 'Medical_History_2_10', 'Medical_History_2_11', 'Medical_History_2_12', 'Medical_History_2_13', 'Medical_History_2_15', 'Medical_History_2_17', 'Medical_History_2_18', 'Medical_History_2_19', 'Medical_History_2_20', 'Medical_History_2_21', 'Medical_History_2_22', 'Medical_History_2_23', 'Medical_History_2_24', 'Medical_History_2_25', 'Medical_History_2_26', 'Medical_History_2_27', 'Medical_History_2_28', 'Medical_History_2_29', 'Medical_History_2_30', 'Medical_History_2_31', 'Medical_History_2_32', 'Medical_History_2_33', 'Medical_History_2_34', 'Medical_History_2_35', 'Medical_History_2_36', 'Medical_History_2_37', 'Medical_History_2_38', 'Medical_History_2_39', 'Medical_History_2_40', 'Medical_History_2_41', 'Medical_History_2_42', 'Medical_History_2_43', 'Medical_History_2_44', 'Medical_History_2_45', 'Medical_History_2_46', 'Medical_History_2_47', 'Medical_History_2_48', 'Medical_History_2_49', 'Medical_History_2_50', 'Medical_History_2_51', 'Medical_History_2_52', 'Medical_History_2_53', 'Medical_History_2_54', 'Medical_History_2_55', 'Medical_History_2_56', 'Medical_History_2_58', 'Medical_History_2_59', 'Medical_History_2_60', 'Medical_History_2_61', 'Medical_History_2_62', 'Medical_History_2_63', 'Medical_History_2_64', 'Medical_History_2_65', 'Medical_History_2_66', 'Medical_History_2_67', 'Medical_History_2_68', 'Medical_History_2_69', 'Medical_History_2_70', 'Medical_History_2_71', 'Medical_History_2_72', 'Medical_History_2_73', 'Medical_History_2_74', 'Medical_History_2_75', 'Medical_History_2_76', 'Medical_History_2_77', 'Medical_History_2_78', 'Medical_History_2_79', 'Medical_History_2_80', 'Medical_History_2_81', 'Medical_History_2_82', 'Medical_History_2_83', 'Medical_History_2_84', 'Medical_History_2_85', 'Medical_History_2_86', 'Medical_History_2_87', 'Medical_History_2_88', 'Medical_History_2_89', 'Medical_History_2_90', 'Medical_History_2_91', 'Medical_History_2_92', 'Medical_History_2_93', 'Medical_History_2_94', 'Medical_History_2_95', 'Medical_History_2_96', 'Medical_History_2_97', 'Medical_History_2_98', 'Medical_History_2_99', 'Medical_History_2_100', 'Medical_History_2_101', 'Medical_History_2_102', 'Medical_History_2_103', 'Medical_History_2_104', 'Medical_History_2_105', 'Medical_History_2_106', 'Medical_History_2_107', 'Medical_History_2_108', 'Medical_History_2_109', 'Medical_History_2_110', 'Medical_History_2_112', 'Medical_History_2_113', 'Medical_History_2_114', 'Medical_History_2_115', 'Medical_History_2_116', 'Medical_History_2_117', 'Medical_History_2_118', 'Medical_History_2_119', 'Medical_History_2_120', 'Medical_History_2_121', 'Medical_History_2_122', 'Medical_History_2_123', 'Medical_History_2_124', 'Medical_History_2_125', 'Medical_History_2_126', 'Medical_History_2_127', 'Medical_History_2_128', 'Medical_History_2_130', 'Medical_History_2_131', 'Medical_History_2_132', 'Medical_History_2_133', 'Medical_History_2_134', 'Medical_History_2_135', 'Medical_History_2_136', 'Medical_History_2_137', 'Medical_History_2_138', 'Medical_History_2_139', 'Medical_History_2_140', 'Medical_History_2_141', 'Medical_History_2_143', 'Medical_History_2_144', 'Medical_History_2_145', 'Medical_History_2_146', 'Medical_History_2_147', 'Medical_History_2_148', 'Medical_History_2_149', 'Medical_History_2_150', 'Medical_History_2_151', 'Medical_History_2_154', 'Medical_History_2_155', 'Medical_History_2_156', 'Medical_History_2_157', 'Medical_History_2_160', 'Medical_History_2_161', 'Medical_History_2_162', 'Medical_History_2_163', 'Medical_History_2_164', 'Medical_History_2_165', 'Medical_History_2_166', 'Medical_History_2_167', 'Medical_History_2_168', 'Medical_History_2_169', 'Medical_History_2_170', 'Medical_History_2_171', 'Medical_History_2_172', 'Medical_History_2_173', 'Medical_History_2_174', 'Medical_History_2_175', 'Medical_History_2_176', 'Medical_History_2_178', 'Medical_History_2_179', 'Medical_History_2_180', 'Medical_History_2_181', 'Medical_History_2_182', 'Medical_History_2_183', 'Medical_History_2_184', 'Medical_History_2_185', 'Medical_History_2_186', 'Medical_History_2_187', 'Medical_History_2_189', 'Medical_History_2_190', 'Medical_History_2_191', 'Medical_History_2_192', 'Medical_History_2_193', 'Medical_History_2_194', 'Medical_History_2_195', 'Medical_History_2_196', 'Medical_History_2_197', 'Medical_History_2_198', 'Medical_History_2_199', 'Medical_History_2_200', 'Medical_History_2_201', 'Medical_History_2_202', 'Medical_History_2_203', 'Medical_History_2_204', 'Medical_History_2_205', 'Medical_History_2_206', 'Medical_History_2_207', 'Medical_History_2_208', 'Medical_History_2_209', 'Medical_History_2_210', 'Medical_History_2_211', 'Medical_History_2_212', 'Medical_History_2_213', 'Medical_History_2_214', 'Medical_History_2_215', 'Medical_History_2_216', 'Medical_History_2_217', 'Medical_History_2_218', 'Medical_History_2_219', 'Medical_History_2_220', 'Medical_History_2_221', 'Medical_History_2_222', 'Medical_History_2_223', 'Medical_History_2_224', 'Medical_History_2_225', 'Medical_History_2_226', 'Medical_History_2_227', 'Medical_History_2_228', 'Medical_History_2_229', 'Medical_History_2_230', 'Medical_History_2_231', 'Medical_History_2_232', 'Medical_History_2_233', 'Medical_History_2_234', 'Medical_History_2_235', 'Medical_History_2_236', 'Medical_History_2_237', 'Medical_History_2_238', 'Medical_History_2_239', 'Medical_History_2_240', 'Medical_History_2_241', 'Medical_History_2_242', 'Medical_History_2_243', 'Medical_History_2_244', 'Medical_History_2_245', 'Medical_History_2_246', 'Medical_History_2_247', 'Medical_History_2_248', 'Medical_History_2_249', 'Medical_History_2_250', 'Medical_History_2_251', 'Medical_History_2_252', 'Medical_History_2_253', 'Medical_History_2_254', 'Medical_History_2_256', 'Medical_History_2_257', 'Medical_History_2_258', 'Medical_History_2_259', 'Medical_History_2_260', 'Medical_History_2_261', 'Medical_History_2_262', 'Medical_History_2_263', 'Medical_History_2_264', 'Medical_History_2_265', 'Medical_History_2_266', 'Medical_History_2_267', 'Medical_History_2_268', 'Medical_History_2_269', 'Medical_History_2_270', 'Medical_History_2_271', 'Medical_History_2_272', 'Medical_History_2_273', 'Medical_History_2_274', 'Medical_History_2_275', 'Medical_History_2_276', 'Medical_History_2_277', 'Medical_History_2_278', 'Medical_History_2_279', 'Medical_History_2_280', 'Medical_History_2_281', 'Medical_History_2_282', 'Medical_History_2_283', 'Medical_History_2_284', 'Medical_History_2_285', 'Medical_History_2_286', 'Medical_History_2_287', 'Medical_History_2_288', 'Medical_History_2_289', 'Medical_History_2_290', 'Medical_History_2_291', 'Medical_History_2_292', 'Medical_History_2_293', 'Medical_History_2_294', 'Medical_History_2_295', 'Medical_History_2_296', 'Medical_History_2_297', 'Medical_History_2_298', 'Medical_History_2_299', 'Medical_History_2_300', 'Medical_History_2_301', 'Medical_History_2_302', 'Medical_History_2_303', 'Medical_History_2_304', 'Medical_History_2_305', 'Medical_History_2_306', 'Medical_History_2_307', 'Medical_History_2_308', 'Medical_History_2_309', 'Medical_History_2_310', 'Medical_History_2_311', 'Medical_History_2_312', 'Medical_History_2_313', 'Medical_History_2_314', 'Medical_History_2_315', 'Medical_History_2_316', 'Medical_History_2_317', 'Medical_History_2_318', 'Medical_History_2_319', 'Medical_History_2_320', 'Medical_History_2_321', 'Medical_History_2_322', 'Medical_History_2_324', 'Medical_History_2_325', 'Medical_History_2_326', 'Medical_History_2_327', 'Medical_History_2_328', 'Medical_History_2_329', 'Medical_History_2_330', 'Medical_History_2_331', 'Medical_History_2_332', 'Medical_History_2_333', 'Medical_History_2_334', 'Medical_History_2_335', 'Medical_History_2_336', 'Medical_History_2_337', 'Medical_History_2_338', 'Medical_History_2_339', 'Medical_History_2_340', 'Medical_History_2_341', 'Medical_History_2_342', 'Medical_History_2_343', 'Medical_History_2_344', 'Medical_History_2_345', 'Medical_History_2_346', 'Medical_History_2_347', 'Medical_History_2_348', 'Medical_History_2_349', 'Medical_History_2_350', 'Medical_History_2_351', 'Medical_History_2_353', 'Medical_History_2_354', 'Medical_History_2_355', 'Medical_History_2_356', 'Medical_History_2_357', 'Medical_History_2_358', 'Medical_History_2_360', 'Medical_History_2_361', 'Medical_History_2_362', 'Medical_History_2_363', 'Medical_History_2_364', 'Medical_History_2_365', 'Medical_History_2_366', 'Medical_History_2_367', 'Medical_History_2_368', 'Medical_History_2_369', 'Medical_History_2_370', 'Medical_History_2_371', 'Medical_History_2_372', 'Medical_History_2_374', 'Medical_History_2_375', 'Medical_History_2_376', 'Medical_History_2_377', 'Medical_History_2_378', 'Medical_History_2_379', 'Medical_History_2_380', 'Medical_History_2_381', 'Medical_History_2_382', 'Medical_History_2_383', 'Medical_History_2_384', 'Medical_History_2_385', 'Medical_History_2_386', 'Medical_History_2_387', 'Medical_History_2_388', 'Medical_History_2_389', 'Medical_History_2_390', 'Medical_History_2_391', 'Medical_History_2_392', 'Medical_History_2_394', 'Medical_History_2_395', 'Medical_History_2_396', 'Medical_History_2_397', 'Medical_History_2_398', 'Medical_History_2_399', 'Medical_History_2_400', 'Medical_History_2_401', 'Medical_History_2_402', 'Medical_History_2_403', 'Medical_History_2_404', 'Medical_History_2_405', 'Medical_History_2_406', 'Medical_History_2_407', 'Medical_History_2_408', 'Medical_History_2_409', 'Medical_History_2_410', 'Medical_History_2_411', 'Medical_History_2_412', 'Medical_History_2_413', 'Medical_History_2_414', 'Medical_History_2_415', 'Medical_History_2_416', 'Medical_History_2_417', 'Medical_History_2_418', 'Medical_History_2_419', 'Medical_History_2_421', 'Medical_History_2_422', 'Medical_History_2_423', 'Medical_History_2_424', 'Medical_History_2_425', 'Medical_History_2_426', 'Medical_History_2_427', 'Medical_History_2_428', 'Medical_History_2_429', 'Medical_History_2_430', 'Medical_History_2_431', 'Medical_History_2_432', 'Medical_History_2_433', 'Medical_History_2_434', 'Medical_History_2_435', 'Medical_History_2_436', 'Medical_History_2_437', 'Medical_History_2_438', 'Medical_History_2_440', 'Medical_History_2_441', 'Medical_History_2_442', 'Medical_History_2_443', 'Medical_History_2_444', 'Medical_History_2_445', 'Medical_History_2_446', 'Medical_History_2_447', 'Medical_History_2_448', 'Medical_History_2_449', 'Medical_History_2_450', 'Medical_History_2_451', 'Medical_History_2_452', 'Medical_History_2_453', 'Medical_History_2_454', 'Medical_History_2_455', 'Medical_History_2_456', 'Medical_History_2_457', 'Medical_History_2_458', 'Medical_History_2_459', 'Medical_History_2_460', 'Medical_History_2_461', 'Medical_History_2_462', 'Medical_History_2_464', 'Medical_History_2_465', 'Medical_History_2_466', 'Medical_History_2_467', 'Medical_History_2_468', 'Medical_History_2_469', 'Medical_History_2_470', 'Medical_History_2_471', 'Medical_History_2_472', 'Medical_History_2_473', 'Medical_History_2_474', 'Medical_History_2_475', 'Medical_History_2_477', 'Medical_History_2_478', 'Medical_History_2_479', 'Medical_History_2_480', 'Medical_History_2_481', 'Medical_History_2_482', 'Medical_History_2_483', 'Medical_History_2_484', 'Medical_History_2_485', 'Medical_History_2_486', 'Medical_History_2_487', 'Medical_History_2_488', 'Medical_History_2_489', 'Medical_History_2_490', 'Medical_History_2_491', 'Medical_History_2_492', 'Medical_History_2_493', 'Medical_History_2_494', 'Medical_History_2_495', 'Medical_History_2_496', 'Medical_History_2_497', 'Medical_History_2_498', 'Medical_History_2_499', 'Medical_History_2_500', 'Medical_History_2_501', 'Medical_History_2_502', 'Medical_History_2_503', 'Medical_History_2_504', 'Medical_History_2_505', 'Medical_History_2_506', 'Medical_History_2_507', 'Medical_History_2_508', 'Medical_History_2_509', 'Medical_History_2_510', 'Medical_History_2_511', 'Medical_History_2_512', 'Medical_History_2_513', 'Medical_History_2_514', 'Medical_History_2_515', 'Medical_History_2_516', 'Medical_History_2_517', 'Medical_History_2_518', 'Medical_History_2_519', 'Medical_History_2_520', 'Medical_History_2_521', 'Medical_History_2_522', 'Medical_History_2_523', 'Medical_History_2_524', 'Medical_History_2_525', 'Medical_History_2_526', 'Medical_History_2_527', 'Medical_History_2_528', 'Medical_History_2_529', 'Medical_History_2_530', 'Medical_History_2_531', 'Medical_History_2_532', 'Medical_History_2_533', 'Medical_History_2_534', 'Medical_History_2_535', 'Medical_History_2_536', 'Medical_History_2_537', 'Medical_History_2_538', 'Medical_History_2_539', 'Medical_History_2_540', 'Medical_History_2_541', 'Medical_History_2_542', 'Medical_History_2_543', 'Medical_History_2_544', 'Medical_History_2_545', 'Medical_History_2_548', 'Medical_History_2_549', 'Medical_History_2_550', 'Medical_History_2_551', 'Medical_History_2_552', 'Medical_History_2_553', 'Medical_History_2_554', 'Medical_History_2_555', 'Medical_History_2_556', 'Medical_History_2_557', 'Medical_History_2_558', 'Medical_History_2_559', 'Medical_History_2_560', 'Medical_History_2_561', 'Medical_History_2_562', 'Medical_History_2_563', 'Medical_History_2_564', 'Medical_History_2_565', 'Medical_History_2_566', 'Medical_History_2_567', 'Medical_History_2_568', 'Medical_History_2_569', 'Medical_History_2_570', 'Medical_History_2_571', 'Medical_History_2_572', 'Medical_History_2_573', 'Medical_History_2_574', 'Medical_History_2_575', 'Medical_History_2_576', 'Medical_History_2_577', 'Medical_History_2_578', 'Medical_History_2_579', 'Medical_History_2_580', 'Medical_History_2_582', 'Medical_History_2_583', 'Medical_History_2_584', 'Medical_History_2_585', 'Medical_History_2_586', 'Medical_History_2_587', 'Medical_History_2_588', 'Medical_History_2_589', 'Medical_History_2_590', 'Medical_History_2_591', 'Medical_History_2_592', 'Medical_History_2_593', 'Medical_History_2_594', 'Medical_History_2_595', 'Medical_History_2_596', 'Medical_History_2_597', 'Medical_History_2_598', 'Medical_History_2_599', 'Medical_History_2_600', 'Medical_History_2_601', 'Medical_History_2_602', 'Medical_History_2_603', 'Medical_History_2_604', 'Medical_History_2_605', 'Medical_History_2_606', 'Medical_History_2_607', 'Medical_History_2_609', 'Medical_History_2_610', 'Medical_History_2_611', 'Medical_History_2_612', 'Medical_History_2_613', 'Medical_History_2_614', 'Medical_History_2_615', 'Medical_History_2_616', 'Medical_History_2_617', 'Medical_History_2_618', 'Medical_History_2_619', 'Medical_History_2_620', 'Medical_History_2_621', 'Medical_History_2_622', 'Medical_History_2_623', 'Medical_History_2_624', 'Medical_History_2_625', 'Medical_History_2_626', 'Medical_History_2_627', 'Medical_History_2_628'] def OneHot(df, colnames): from pandas import get_dummies, concat for col in colnames: dummies = get_dummies(df[col]) #ndumcols = dummies.shape[1] dummies.rename(columns={p: col + '_' + str(i + 1) for i, p in enumerate(dummies.columns.values)}, inplace=True) df = concat([df, dummies], axis=1) pass df = df.drop(colnames, axis=1) return df def Kappa(y_true, y_pred, **kwargs): from skll import kappa return kappa(y_true, y_pred, **kwargs) def NegQWKappaScorer(y_hat, y): from numpy import clip #MIN, MAX = (-3, 12) MIN, MAX = (1, 8) return -Kappa(clip(y, MIN, MAX), clip(y_hat, MIN, MAX), weights='quadratic', min_rating=MIN, max_rating=MAX) from sklearn.base import BaseEstimator, RegressorMixin class PrudentialRegressor(BaseEstimator, RegressorMixin): def __init__(self, objective='reg:linear', learning_rate=0.045, min_child_weight=50, subsample=0.8, colsample_bytree=0.7, max_depth=7, n_estimators=700, nthread=-1, seed=0, n_buckets=8, initial_params=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6, #1., 2., 3., 4., 5., 6., 7. ], minimizer='BFGS', scoring=NegQWKappaScorer): self.objective = objective self.learning_rate = learning_rate self.min_child_weight = min_child_weight self.subsample = subsample self.colsample_bytree = colsample_bytree self.max_depth = max_depth self.n_estimators = n_estimators self.nthread = nthread self.seed = seed self.n_buckets = n_buckets self.initial_params = initial_params self.minimizer = minimizer self.scoring = scoring return def fit(self, X, y): from xgboost import XGBRegressor from OptimizedOffsetRegressor import DigitizedOptimizedOffsetRegressor self.xgb = XGBRegressor( objective=self.objective, learning_rate=self.learning_rate, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, max_depth=self.max_depth, n_estimators=self.n_estimators, nthread=self.nthread, missing=0.0, seed=self.seed) #from OptimizedOffsetRegressor import FullDigitizedOptimizedOffsetRegressor #self.off = FullDigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, # basinhopping=True, self.off = DigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, initial_params=self.initial_params, minimizer=self.minimizer, scoring=self.scoring) self.xgb.fit(X, y) tr_y_hat = self.xgb.predict(X, ntree_limit=self.xgb.booster().best_iteration) print('Train score is:', -self.scoring(tr_y_hat, y)) self.off.fit(tr_y_hat, y) print("Offsets:", self.off.params) return self def predict(self, X): from numpy import clip te_y_hat = self.xgb.predict(X, ntree_limit=self.xgb.booster().best_iteration) return clip(self.off.predict(te_y_hat), 1, 8) pass class PrudentialRegressorFO(BaseEstimator, RegressorMixin): def __init__(self, objective='reg:linear', learning_rate=0.045, min_child_weight=50, subsample=0.8, colsample_bytree=0.7, max_depth=7, n_estimators=700, nthread=-1, seed=0, n_buckets=8, initial_params=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6, #1., 2., 3., 4., 5., 6., 7. ], minimizer='BFGS', scoring=NegQWKappaScorer): self.objective = objective self.learning_rate = learning_rate self.min_child_weight = min_child_weight self.subsample = subsample self.colsample_bytree = colsample_bytree self.max_depth = max_depth self.n_estimators = n_estimators self.nthread = nthread self.seed = seed self.n_buckets = n_buckets self.initial_params = initial_params self.minimizer = minimizer self.scoring = scoring return def fit(self, X, y): from xgboost import XGBRegressor self.xgb = XGBRegressor( objective=self.objective, learning_rate=self.learning_rate, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, max_depth=self.max_depth, n_estimators=self.n_estimators, nthread=self.nthread, missing=0.0, seed=self.seed) from OptimizedOffsetRegressor import FullDigitizedOptimizedOffsetRegressor self.off = FullDigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, # basinhopping=True, initial_params=self.initial_params, minimizer=self.minimizer, scoring=self.scoring) self.xgb.fit(X, y) tr_y_hat = self.xgb.predict(X, ntree_limit=self.xgb.booster().best_iteration) print('Train score is:', -self.scoring(tr_y_hat, y)) self.off.fit(tr_y_hat, y) print("Offsets:", self.off.params) return self def predict(self, X): from numpy import clip te_y_hat = self.xgb.predict(X, ntree_limit=self.xgb.booster().best_iteration) return clip(self.off.predict(te_y_hat), 1, 8) pass class PrudentialRegressorCVO(BaseEstimator, RegressorMixin): def __init__(self, objective='reg:linear', learning_rate=0.045, min_child_weight=50, subsample=0.8, colsample_bytree=0.7, max_depth=7, n_estimators=700, nthread=-1, seed=0, n_buckets=8, initial_params=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6, #1., 2., 3., 4., 5., 6., 7. ], minimizer='BFGS', scoring=NegQWKappaScorer): self.objective = objective self.learning_rate = learning_rate self.min_child_weight = min_child_weight self.subsample = subsample self.colsample_bytree = colsample_bytree self.max_depth = max_depth self.n_estimators = n_estimators self.nthread = nthread self.seed = seed self.n_buckets = n_buckets self.initial_params = initial_params self.minimizer = minimizer self.scoring = scoring return def fit(self, X, y): from xgboost import XGBRegressor from OptimizedOffsetRegressor import DigitizedOptimizedOffsetRegressor #from OptimizedOffsetRegressor import FullDigitizedOptimizedOffsetRegressor #self.off = FullDigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, # basinhopping=True, """ 2 / 5 grid scores: mean: 0.65531, std: 0.00333, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65531 3 / 5 grid scores: mean: 0.65474, std: 0.00308, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65474 4 / 5 grid scores: mean: 0.65490, std: 0.00302, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65490 2 / 10 grid scores: mean: 0.65688, std: 0.00725, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65688 3 / 10 grid scores: mean: 0.65705, std: 0.00714, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65705 4 / 10 grid scores: mean: 0.65643, std: 0.00715, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65643 5 / 10 grid scores: mean: 0.65630, std: 0.00699, params: {'n_estimators': 700, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'min_child_weight': 240} best score: 0.65630 """ from sklearn.cross_validation import StratifiedKFold kf = StratifiedKFold(y, n_folds=2) print(kf) params = [] for itrain, itest in kf: ytrain = y[itrain] Xtrain = X.iloc[list(itrain)] ytest = y[itest] Xtest = X.iloc[list(itest)] self.xgb = XGBRegressor( objective=self.objective, learning_rate=self.learning_rate, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, max_depth=self.max_depth, n_estimators=self.n_estimators, nthread=self.nthread, missing=0.0, seed=self.seed) self.xgb.fit(Xtrain, ytrain) te_y_hat = self.xgb.predict(Xtest, ntree_limit=self.xgb.booster().best_iteration) print('XGB Test score is:', -self.scoring(te_y_hat, ytest)) self.off = DigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, initial_params=self.initial_params, minimizer=self.minimizer, scoring=self.scoring) self.off.fit(te_y_hat, ytest) print("Offsets:", self.off.params) params += [list(self.off.params)] pass from numpy import array self.off.params = array(params).mean(axis=0) print("Mean Offsets:", self.off.params) self.xgb.fit(X, y) return self def predict(self, X): from numpy import clip te_y_hat = self.xgb.predict(X, ntree_limit=self.xgb.booster().best_iteration) return clip(self.off.predict(te_y_hat), 1, 8) pass class PrudentialRegressorCVO2(BaseEstimator, RegressorMixin): def __init__(self, objective='reg:linear', learning_rate=0.045, learning_rates=None, min_child_weight=50, subsample=0.8, colsample_bytree=0.7, max_depth=7, gamma=0.0, n_estimators=700, nthread=-1, seed=0, n_buckets=8, int_fold=6, initial_params=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6, #1., 2., 3., 4., 5., 6., 7. ], minimizer='BFGS', scoring=NegQWKappaScorer): self.objective = objective self.learning_rate = learning_rate self.learning_rates = learning_rates self.min_child_weight = min_child_weight self.subsample = subsample self.colsample_bytree = colsample_bytree self.max_depth = max_depth self.gamma = gamma self.n_estimators = n_estimators self.nthread = nthread self.seed = seed self.n_buckets = n_buckets self.int_fold = int_fold self.initial_params = initial_params self.minimizer = minimizer self.scoring = scoring self.feature_importances_ = None return def _update_feature_iportances(self, feature_names): from numpy import zeros feature_importances = zeros(len(feature_names)) for xgb in self.xgb: importances = xgb.booster().get_fscore() for i, feat in enumerate(feature_names): if feat in importances: feature_importances[i] += importances[feat] pass pass pass self.feature_importances_ = feature_importances / sum(feature_importances) return def fit(self, X, y): from OptimizedOffsetRegressor import DigitizedOptimizedOffsetRegressor #from OptimizedOffsetRegressor import FullDigitizedOptimizedOffsetRegressor #self.off = FullDigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, # basinhopping=True, """ 5-fold Stratified CV grid scores: mean: 0.64475, std: 0.00483, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 2, 'max_depth': 6} mean: 0.64926, std: 0.00401, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 3, 'max_depth': 6} mean: 0.65281, std: 0.00384, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 4, 'max_depth': 6} mean: 0.65471, std: 0.00422, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 5, 'max_depth': 6} mean: 0.65563, std: 0.00440, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 6, 'max_depth': 6} mean: 0.65635, std: 0.00433, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} mean: 0.65600, std: 0.00471, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 8, 'max_depth': 6} best score: 0.65635 best params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} reversed params [8 bins]: mean: 0.65588, std: 0.00417, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 6, 'max_depth': 6} mean: 0.65640, std: 0.00438, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} with Scirpus obj grid scores: mean: 0.65775, std: 0.00429, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} best score: 0.65775 +1 na trzech Product_info_2* mean: 0.65555, std: 0.00462, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 6, 'max_depth': 6} mean: 0.65613, std: 0.00438, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} DISCRETE: NaN=most_common, +Medical_History_10,24, (24 jest znaczacy) mean: 0.65589, std: 0.00490, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} PROPER DATA + Scirpus + reversed params + no-drops mean: 0.65783, std: 0.00444, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} PROPER DATA + Scirpus + reversed params + no-drops, [email protected] mean: 0.65790, std: 0.00421, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 6} jak wyzej, max_depth=7 mean: 0.65802, std: 0.00420, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 7} jak wyzej, max_depth=10 mean: 0.65833, std: 0.00387, params: {'colsample_bytree': 0.67, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, max_depth=10, eta=0.03 mean: 0.65888, std: 0.00391, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, max_depth=30, eta=0.02 mean: 0.65798, std: 0.00340, params: {'colsample_bytree': 0.67, 'learning_rate': 0.02, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 30} jak wyzej, max_depth=10, eta=0.03, eval_metric=Scirpus mean: 0.65891, std: 0.00395, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, max_depth=10, eta=0.03, eval_metric=QWKappa mean: 0.65827, std: 0.00368, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, max_depth=10, eta=0.03, eval_metric=Scirpus, GMM6,GMM17 mean: 0.65862, std: 0.00423, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, max_depth=10, eta=0.03, eval_metric=Scirpus, Gvector mean: 0.65864, std: 0.00384, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, max_depth=10, eta=0.03, eval_metric=Scirpus, learning_rates=[0.03] * 200 + [0.02] * 500, mean: 0.65910, std: 0.00384, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} jak wyzej, +nowy bucketing (max_depth=10, eta=0.03, eval_metric=Scirpus, learning_rates=[0.03] * 200 + [0.02] * 500,) n_buckets=7: mean: 0.65876, std: 0.00405, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} n_buckets=8: mean: 0.65966, std: 0.00412, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} n_buckets=9: mean: 0.65965, std: 0.00390, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} n_buckets=10: mean: 0.65359, std: 0.00405, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} n_buckets=12: mean: 0.65705, std: 0.00442, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} """ from sklearn.cross_validation import StratifiedKFold kf = StratifiedKFold(y, n_folds=self.int_fold) print(kf) self.xgb = [] self.off = [] for i, (itrain, itest) in enumerate(kf): ytrain = y[itrain] Xtrain = X.iloc[list(itrain)] ytest = y[itest] Xtest = X.iloc[list(itest)] self.xgb += [None] from xgb_sklearn import XGBRegressor #from xgboost import XGBRegressor self.xgb[i] = XGBRegressor( objective=self.objective, learning_rate=self.learning_rate, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, max_depth=self.max_depth, gamma=self.gamma, n_estimators=self.n_estimators, nthread=self.nthread, missing=0.0, seed=self.seed) self.xgb[i].fit(Xtrain, ytrain, eval_set=[(Xtest, ytest)], #eval_metric=self.scoring, #eval_metric='rmse', eval_metric=scirpus_error, #eval_metric=qwkappa_error, verbose=False, early_stopping_rounds=30, learning_rates=self.learning_rates, obj=scirpus_regobj #obj=qwkappa_regobj ) print("best iteration:", self.xgb[i].booster().best_iteration) te_y_hat = self.xgb[i].predict(Xtest, ntree_limit=self.xgb[i].booster().best_iteration) print('XGB Test score is:', -self.scoring(te_y_hat, ytest)) self.off += [None] self.off[i] = DigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, initial_params=self.initial_params, minimizer=self.minimizer, scoring=self.scoring) self.off[i].fit(te_y_hat, ytest) print("Offsets:", self.off[i].params) pass self._update_feature_iportances(X.columns.values.tolist()) return self def predict(self, X): from numpy import clip, array result = [] for xgb, off in zip(self.xgb, self.off): te_y_hat = xgb.predict(X, ntree_limit=xgb.booster().best_iteration) result.append(off.predict(te_y_hat)) result = clip(array(result).mean(axis=0), 1, 8) return result pass class PrudentialRegressorCVO3(BaseEstimator, RegressorMixin): def __init__(self, objective='reg:linear', learning_rate=0.045, learning_rates=None, min_child_weight=50, subsample=0.8, colsample_bytree=0.7, max_depth=7, n_estimators=700, nthread=-1, seed=0, n_buckets=8, int_fold=6, initial_params=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6, #1., 2., 3., 4., 5., 6., 7. ], minimizer='BFGS', scoring=NegQWKappaScorer): self.objective = objective self.learning_rate = learning_rate self.learning_rates = learning_rates self.min_child_weight = min_child_weight self.subsample = subsample self.colsample_bytree = colsample_bytree self.max_depth = max_depth self.n_estimators = n_estimators self.nthread = nthread self.seed = seed self.n_buckets = n_buckets self.int_fold = int_fold self.initial_params = initial_params self.minimizer = minimizer self.scoring = scoring return """ nah grid scores: mean: 0.65882, std: 0.00382, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} best score: 0.65882 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} """ def fit(self, X, y): from OptimizedOffsetRegressor import DigitizedOptimizedOffsetRegressor from sklearn.cross_validation import StratifiedKFold kf = StratifiedKFold(y, n_folds=self.int_fold) print(kf) self.xgb = [] self.off = [] for i, (itrain, itest) in enumerate(kf): ytrain = y[itrain] Xtrain = X.iloc[list(itrain)] ytest = y[itest] Xtest = X.iloc[list(itest)] self.xgb += [None] from xgb_sklearn import XGBRegressor #from xgboost import XGBRegressor self.xgb[i] = XGBRegressor( objective=self.objective, learning_rate=self.learning_rate, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, max_depth=self.max_depth, n_estimators=self.n_estimators, nthread=self.nthread, missing=0.0, seed=self.seed) self.xgb[i].fit(Xtrain, ytrain, eval_set=[(Xtest, ytest)], #eval_metric=self.scoring, #eval_metric='rmse', eval_metric=scirpus_error, #eval_metric=qwkappa_error, verbose=False, early_stopping_rounds=30, learning_rates=self.learning_rates, obj=scirpus_regobj #obj=qwkappa_regobj ) print("best iteration:", self.xgb[i].booster().best_iteration) te_y_hat = self.xgb[i].predict(Xtest, ntree_limit=self.xgb[i].booster().best_iteration) print('XGB Test score is:', -self.scoring(te_y_hat, ytest)) pass xgb_result = [] for xgb in self.xgb: tr_y_hat = xgb.predict(Xtrain, ntree_limit=xgb.booster().best_iteration) xgb_result.append(tr_y_hat) from numpy import array xgb_result = array(xgb_result).mean(axis=0) self.off = DigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, initial_params=self.initial_params, minimizer=self.minimizer, scoring=self.scoring) self.off.fit(xgb_result, ytrain) print("Offsets:", self.off.params) return self def predict(self, X): from numpy import clip, array xgb_result = [] for xgb in self.xgb: te_y_hat = xgb.predict(X, ntree_limit=xgb.booster().best_iteration) xgb_result.append(te_y_hat) xgb_result = array(xgb_result).mean(axis=0) result = clip(self.off.predict(xgb_result), 1, 8) return result pass class PrudentialRegressorCVO2FO(BaseEstimator, RegressorMixin): def __init__(self, objective='reg:linear', learning_rate=0.045, min_child_weight=50, subsample=0.8, colsample_bytree=0.7, max_depth=7, n_estimators=700, nthread=-1, seed=0, n_buckets=8, int_fold=6, initial_params=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6, #1., 2., 3., 4., 5., 6., 7. ], minimizer='BFGS', scoring=NegQWKappaScorer): self.objective = objective self.learning_rate = learning_rate self.min_child_weight = min_child_weight self.subsample = subsample self.colsample_bytree = colsample_bytree self.max_depth = max_depth self.n_estimators = n_estimators self.nthread = nthread self.seed = seed self.n_buckets = n_buckets self.int_fold = int_fold self.initial_params = initial_params self.minimizer = minimizer self.scoring = scoring # from numpy.random import seed as random_seed # random_seed(seed) return def __call__(self, i, te_y_hat, ytest): print('XGB[{}] Test score is:'.format(i + 1), -self.scoring(te_y_hat, ytest)) from OptimizedOffsetRegressor import FullDigitizedOptimizedOffsetRegressor off = FullDigitizedOptimizedOffsetRegressor(n_buckets=self.n_buckets, basinhopping=True, initial_params=self.initial_params, minimizer=self.minimizer, scoring=self.scoring) off.fit(te_y_hat, ytest) print("Offsets[{}]:".format(i + 1), off.params) return off def fit(self, X, y): from sklearn.cross_validation import StratifiedKFold kf = StratifiedKFold(y, n_folds=self.int_fold) print(kf) self.xgb = [] self.off = [] datamap = {i: (itrain, itest) for i, (itrain, itest) in enumerate(kf)} for i, (itrain, _) in datamap.items(): ytrain = y[itrain] Xtrain = X.iloc[list(itrain)] self.xgb += [None] #from xgboost import XGBRegressor from xgb_sklearn import XGBRegressor self.xgb[i] = XGBRegressor( objective=self.objective, learning_rate=self.learning_rate, min_child_weight=self.min_child_weight, subsample=self.subsample, colsample_bytree=self.colsample_bytree, max_depth=self.max_depth, n_estimators=self.n_estimators, nthread=self.nthread, missing=0.0 + 1e6, seed=self.seed) self.xgb[i].fit(Xtrain, ytrain, obj=scirpus_regobj) pass from joblib import Parallel, delayed from sklearn.base import clone off = Parallel( n_jobs=self.nthread, verbose=2, #pre_dispatch='n_jobs', )( delayed(clone(self))(i, self.xgb[i].predict(X.iloc[list(itest)], ntree_limit=self.xgb[i].booster().best_iteration), y[itest]) for i, (_, itest) in datamap.items()) self.off = off return self def predict(self, X): from numpy import clip, array result = [] for xgb, off in zip(self.xgb, self.off): te_y_hat = xgb.predict(X, ntree_limit=xgb.booster().best_iteration) result.append(off.predict(te_y_hat)) result = clip(array(result).mean(axis=0), 1, 8) return result pass def scirpus_regobj(preds, dtrain): labels = dtrain.get_label() x = (preds - labels) from numpy import exp as npexp grad = 2 * x * npexp(-(x ** 2)) * (npexp(x ** 2) + x ** 2 + 1) hess = 2 * npexp(-(x ** 2)) * (npexp(x ** 2) - 2 * (x ** 4) + 5 * (x ** 2) - 1) return grad, hess def scirpus_error(preds, dtrain): labels = dtrain.get_label() x = (labels - preds) from numpy import exp as npexp error = (x ** 2) * (1 - npexp(-(x ** 2))) from numpy import mean return 'error', mean(error) def qwkappa_error(preds, dtrain): labels = dtrain.get_label() kappa = NegQWKappaScorer(labels, preds) return 'kappa', kappa def qwkappa_regobj(preds, dtrain): labels = dtrain.get_label() work = preds.copy() from numpy import empty_like grad = empty_like(preds) for i in range(len(preds)): work[i] += 1 score = NegQWKappaScorer(labels, work) work[i] -= 2 grad[i] = (score - NegQWKappaScorer(labels, work)) / 2. work[i] += 1 pass from numpy import ones hess = ones(len(preds)) / len(preds) return grad, hess def work(out_csv_file, estimator, nest, njobs, nfolds, cv_grid, minimizer, nbuckets, mvector, imputer, clf_kwargs, int_fold): from numpy.random import seed as random_seed random_seed(1) from zipfile import ZipFile from pandas import read_csv from numpy import rint,clip,savetxt,stack train = read_csv(ZipFile("../../data/train.csv.zip", 'r').open('train.csv')) test = read_csv(ZipFile("../../data/test.csv.zip", 'r').open('test.csv')) # gmm17_train = read_csv('GMM_17_full_train.csv') # gmm17_test = read_csv('GMM_17_full_test.csv') # gmm6_train = read_csv('GMM_6_full_train.csv') # gmm6_test = read_csv('GMM_6_full_test.csv') # # train['GMM17'] = gmm17_train['Response'] # test['GMM17'] = gmm17_test['Response'] # train['GMM6'] = gmm6_train['Response'] # test['GMM6'] = gmm6_test['Response'] # combine train and test all_data = train.append(test) # G_vectors = read_csv('../../data/G_vectors.csv') # #all_data = all_data.join(G_vectors.drop(['G3'], axis=1)) # all_data = all_data.join( # G_vectors[['G8', 'G11', 'G12', 'G13', 'G17', 'G18', 'G19', 'G20']]) # all_data['G11'] = G_vectors['G11'] # all_data = OneHot(all_data, ['G11']) # all_data['G12'] = G_vectors['G12'] # all_data = OneHot(all_data, ['G12']) from sklearn.preprocessing import Imputer imp = Imputer(missing_values='NaN', strategy='median', axis=0) all_data[DISCRETE] = imp.fit_transform(all_data[DISCRETE]) # from numpy import bincount # for col in all_data[DISCRETE]: # top = bincount(all_data[col].astype(int)).argmax() # all_data[col] -= top imp = Imputer(missing_values='NaN', strategy='median', axis=0) all_data[CONTINUOUS] = imp.fit_transform(all_data[CONTINUOUS]) # all_data[BOOLEANS] = all_data[BOOLEANS] + 1e6 # for col in all_data[CONTINUOUS]: # from numpy import median, mean as npmean # _min = min(all_data[col]) # _max = max(all_data[col]) # _median = median(all_data[col]) # _mean = npmean(all_data[col]) # if _median != _min and _median != _max: # all_data[col + '_median'] = all_data[col] > _median # pass # if _mean != _min and _mean != _max: # all_data[col + '_mean'] = all_data[col] > _mean # pass # pass # from sklearn.preprocessing import StandardScaler # from sklearn.decomposition import PCA # std = StandardScaler(copy=True) # all_data[CONTINUOUS] = std.fit_transform(all_data[CONTINUOUS]) # pca = PCA(whiten=False, copy=True) # all_data[CONTINUOUS] = pca.fit_transform(all_data[CONTINUOUS]) # from scipy.stats.mstats import mquantiles # to_quant = ['Employment_Info_1', 'BMI', 'Ins_Age', 'Employment_Info_6', 'Product_Info_4'] # for col in to_quant: # from numpy import linspace,digitize # splits = mquantiles(all_data[col], linspace(0., 1., 16, endpoint=False)[1:]) # foo = digitize(all_data[col], splits) # all_data[col + '_q'] = foo # pass # all_data = OneHot(all_data, [col + '_q' for col in to_quant]) all_data = OneHot(all_data, NOMINALS_3) all_data = OneHot(all_data, NOMINALS_GE4) """ all_data = all_data.drop(NOMINALS_GE4, axis=1) all_data = OneHot(all_data, NOMINALS_3) mean: 0.65158, std: 0.00558, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} all_data = OneHot(all_data, NOMINALS_3) all_data = OneHot(all_data, NOMINALS_GE4[:2]) all_data = all_data.drop(NOMINALS_GE4[2:], axis=1) mean: 0.65712, std: 0.00514, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} all_data = OneHot(all_data, NOMINALS_3) all_data = OneHot(all_data, NOMINALS_GE4[:-1]) all_data = all_data.drop(NOMINALS_GE4[-1:], axis=1) mean: 0.65903, std: 0.00418, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} all_data = OneHot(all_data, NOMINALS_3) all_data = OneHot(all_data, NOMINALS_GE4) mean: 0.66028, std: 0.00443, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10} inf_fold=5, 28 minutes mean: 0.66003, std: 0.00465, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 5, 'max_depth': 10} inf_fold=5, bez learning_rates, 22 minutes mean: 0.66010, std: 0.00416, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 5, 'max_depth': 10} inf_fold=4, eta=0.05, bez learning_rates, 11 minutes mean: 0.65861, std: 0.00391, params: {'colsample_bytree': 0.67, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 4, 'max_depth': 10} grid scores: mean: 0.65888, std: 0.00443, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.8, 'int_fold': 4, 'max_depth': 10} mean: 0.65914, std: 0.00422, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 4, 'max_depth': 10} mean: 0.65978, std: 0.00360, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} mean: 0.65875, std: 0.00458, params: {'colsample_bytree': 0.7, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.8, 'int_fold': 4, 'max_depth': 10} mean: 0.65913, std: 0.00416, params: {'colsample_bytree': 0.7, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 4, 'max_depth': 10} mean: 0.65860, std: 0.00387, params: {'colsample_bytree': 0.7, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} mean: 0.65941, std: 0.00377, params: {'colsample_bytree': 0.8, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.8, 'int_fold': 4, 'max_depth': 10} mean: 0.65938, std: 0.00420, params: {'colsample_bytree': 0.8, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 4, 'max_depth': 10} mean: 0.65874, std: 0.00378, params: {'colsample_bytree': 0.8, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} best score: 0.65978 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} mean: 0.65933, std: 0.00412, params: {'colsample_bytree': 0.55, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} mean: 0.65961, std: 0.00411, params: {'colsample_bytree': 0.45, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} mean: 0.65953, std: 0.00370, params: {'colsample_bytree': 0.35, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} onehot + poly@CONT, po obcięciu nieważnych grid scores: mean: 0.65729, std: 0.00337, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10} best score: 0.65729 onehot + poly@CONT, po obcięciu nieważnych mean: 0.65660, std: 0.00293, params: {'colsample_bytree': 0.67, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 4, 'max_depth': 10, 'gamma': 0.0} grid scores: mean: 0.65712, std: 0.00384, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65695, std: 0.00361, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65705, std: 0.00351, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 0.0} mean: 0.65729, std: 0.00337, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 0.0} mean: 0.65690, std: 0.00402, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} mean: 0.65678, std: 0.00347, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} mean: 0.65748, std: 0.00388, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 1.0} mean: 0.65727, std: 0.00351, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 1.0} best score: 0.65748 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 1.0} mean: 0.65645, std: 0.00403, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 6, 'gamma': 1.0} mean: 0.65699, std: 0.00394, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 6, 'gamma': 1.0} mean: 0.65746, std: 0.00367, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} mean: 0.65690, std: 0.00402, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} best score: 0.65746 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} efekt +/- mean, +/- median, vs. 0.65748 mean: 0.65728, std: 0.00347, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 1.0} bez poly (psuje) mean: 0.65946, std: 0.00392, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 1.0} grid scores: > mean: 0.66006, std: 0.00437, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65847, std: 0.00445, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65917, std: 0.00514, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 12, 'gamma': 0.0} mean: 0.65956, std: 0.00452, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 12, 'gamma': 0.0} mean: 0.65950, std: 0.00425, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} mean: 0.65888, std: 0.00380, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 1.0} mean: 0.65953, std: 0.00419, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 12, 'gamma': 1.0} mean: 0.65934, std: 0.00400, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 12, 'gamma': 1.0} best score: 0.66006 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} grid scores: mean: 0.65942, std: 0.00459, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 80, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 6, 'gamma': 0.0} mean: 0.65927, std: 0.00386, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 120, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 6, 'gamma': 0.0} mean: 0.65969, std: 0.00381, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 80, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65998, std: 0.00414, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 120, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65946, std: 0.00363, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 80, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 0.0} mean: 0.65967, std: 0.00400, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 120, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 10, 'gamma': 0.0} best score: 0.65998 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 120, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65945, std: 0.00351, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 140, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65982, std: 0.00414, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 150, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65935, std: 0.00393, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 170, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65903, std: 0.00280, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 180, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} best score: 0.65982 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 150, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 0.0} mean: 0.65947, std: 0.00332, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 7, 'gamma': 0.0} mean: 0.65956, std: 0.00395, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 9, 'gamma': 0.0} best score: 0.65956 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 9, 'gamma': 0.0} mean: 0.65951, std: 0.00416, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 2.0} mean: 0.65975, std: 0.00367, params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 4.0} best score: 0.65975 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.05, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 4, 'max_depth': 8, 'gamma': 4.0} ===== full one hot bez poly full CV, cf. 0.66028 grid scores: mean: 0.65966, std: 0.00394, params: {'colsample_bytree': 0.6, 'learning_rate': 0.03, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 7, 'max_depth': 8, 'gamma': 0.0} best score: 0.65966 best params: {'colsample_bytree': 0.6, 'learning_rate': 0.03, 'min_child_weight': 160, 'n_estimators': 700, 'subsample': 1.0, 'int_fold': 7, 'max_depth': 8, 'gamma': 0.0} grid scores: mean: 0.66047, std: 0.00471, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66047 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} nbuckets=8 grid scores: mean: 0.66048, std: 0.00448, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66048 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} +3x3 grid scores: mean: 0.66017, std: 0.00497, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66017 bez BMI_Age grid scores: mean: 0.65855, std: 0.00260, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.65855 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} + G12 grid scores: mean: 0.66070, std: 0.00405, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66070 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} + G8 grid scores: mean: 0.66046, std: 0.00426, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66046 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} + G11 grid scores: mean: 0.66096, std: 0.00448, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66096 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} +G13 grid scores: mean: 0.66004, std: 0.00471, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66004 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} +G18 grid scores: mean: 0.66057, std: 0.00377, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66057 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} +G11,12 grid scores: mean: 0.65996, std: 0.00400, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.65996 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} [min_child_weight] grid scores: mean: 0.66052, std: 0.00477, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} mean: 0.65847, std: 0.00664, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 220, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} mean: 0.66007, std: 0.00419, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 260, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} mean: 0.65942, std: 0.00468, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 280, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66052 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 200, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} [max_depth] grid scores: mean: 0.66014, std: 0.00464, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 8, 'gamma': 0.0} mean: 0.66036, std: 0.00431, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 9, 'gamma': 0.0} mean: 0.65935, std: 0.00609, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 11, 'gamma': 0.0} mean: 0.65793, std: 0.00313, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 12, 'gamma': 0.0} best score: 0.66036 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 9, 'gamma': 0.0} [gamma] grid scores: mean: 0.66007, std: 0.00456, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 2.0} mean: 0.66024, std: 0.00436, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 4.0} best score: 0.66024 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 4.0} quant, grid scores: mean: 0.66015, std: 0.00447, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.66015 Q 10, 8 buckets, ['Employment_Info_1', 'BMI', 'Ins_Age', 'Employment_Info_6', 'Product_Info_4'] grid scores: mean: 0.65939, std: 0.00431, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.65939 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} Q 10, 9 buckets, ['Employment_Info_1', 'BMI', 'Ins_Age', 'Employment_Info_6', 'Product_Info_4'] grid scores: mean: 0.65833, std: 0.00657, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.65833 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} Q 16, 8 buckets, ['Employment_Info_1', 'BMI', 'Ins_Age', 'Employment_Info_6', 'Product_Info_4'] grid scores: mean: 0.65954, std: 0.00403, params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} best score: 0.65954 best params: {'colsample_bytree': 0.67, 'learning_rate': 0.03, 'min_child_weight': 240, 'n_estimators': 700, 'subsample': 0.9, 'int_fold': 7, 'max_depth': 10, 'gamma': 0.0} """ """ Both: 0.65576 BmiAge: 0.65578 MedCount: 0.65638 None: 0.65529 """ med_keyword_columns = all_data.columns[all_data.columns.str.startswith('Medical_Keyword_')] all_data['Med_Keywords_Count'] = all_data[med_keyword_columns].sum(axis=1) # poly_15 all_data['BMI_Age'] = all_data['BMI'] * all_data['Ins_Age'] from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(2, interaction_only=True, include_bias=False).fit_transform(all_data[CONTINUOUS]) poly = poly[:, len(CONTINUOUS):] # for i in range(poly.shape[1]): # all_data['poly_' + str(i + 1)] = poly[:, i] # best_poly_120 = ['poly_64', 'poly_55', 'poly_54', 'poly_57', 'poly_56', 'poly_50', 'poly_52', 'poly_68', 'poly_11', 'poly_10', 'poly_13', 'poly_34', 'poly_15', 'poly_14', 'poly_31', 'poly_16', 'poly_73', 'poly_18', 'poly_75', 'poly_77', 'poly_76', 'poly_39', 'poly_74', 'poly_5', 'poly_4', 'poly_7', 'poly_1', 'poly_3', 'poly_2', 'poly_9', 'poly_12', 'poly_37', 'poly_78', 'poly_35', 'poly_42', 'poly_43', 'poly_40', 'poly_41', 'poly_47', 'poly_45', 'poly_33', 'poly_48', 'poly_49', 'poly_32', 'poly_24', 'poly_25', 'poly_26', 'poly_20', 'poly_21', 'poly_22', 'poly_23', 'poly_30', 'poly_28', 'poly_65', 'poly_66', 'poly_67'] # T4 # best_poly = ['poly_5', 'poly_13', 'poly_14', 'poly_15'] #for n in best_poly: # all_data[n] = poly[:, int(n[5:]) - 1] # 3x3 #all_data['MH1_BMI'] = all_data['Medical_History_1'] * all_data['BMI'] #all_data['MH1_MKC'] = all_data['Medical_History_1'] * all_data['Med_Keywords_Count'] #all_data['BMI_MKC'] = all_data['BMI'] * all_data['Med_Keywords_Count'] all_data = all_data.drop(TO_DROP, axis=1) # Use -1 for any others if imputer is None: all_data.fillna(-1, inplace=True) else: all_data['Response'].fillna(-1, inplace=True) # fix the dtype on the label column all_data['Response'] = all_data['Response'].astype(int) # split train and test train = all_data[all_data['Response'] > 0].copy() test = all_data[all_data['Response'] < 1].copy() dropped_cols = ['Id', 'Response'] train_y = train['Response'].values train_X = train.drop(dropped_cols, axis=1) test_X = test.drop(dropped_cols, axis=1) if imputer is not None: from sklearn.preprocessing import Imputer imp = Imputer(missing_values='NaN', strategy=imputer, axis=0) train_X = imp.fit_transform(train_X) test_X = imp.transform(test_X) prudential_kwargs = \ { 'objective': 'reg:linear', 'learning_rate': 0.045, 'min_child_weight': 50, 'subsample': 0.8, 'colsample_bytree': 0.7, 'max_depth': 7, 'n_estimators': nest, 'nthread': njobs, 'seed': 0, 'n_buckets': nbuckets, 'initial_params': mvector, 'minimizer': minimizer, 'scoring': NegQWKappaScorer } if estimator == 'PrudentialRegressorCVO2FO' or estimator == 'PrudentialRegressorCVO2': prudential_kwargs['int_fold'] = int_fold pass # override kwargs with any changes for k, v in clf_kwargs.items(): prudential_kwargs[k] = v clf = globals()[estimator](**prudential_kwargs) print(estimator, clf.get_params()) if nfolds > 1: param_grid = { 'n_estimators': [700], 'max_depth': [6], 'colsample_bytree': [0.67], 'subsample': [0.9], 'min_child_weight': [240], #'initial_params': [[-0.71238755, -1.4970176, -1.73800531, -1.13361266, -0.82986203, -0.06473039, 0.69008725, 0.94815881]] } for k, v in cv_grid.items(): param_grid[k] = v from sklearn.metrics import make_scorer MIN, MAX = (1, 8) qwkappa = make_scorer(Kappa, weights='quadratic', min_rating=MIN, max_rating=MAX) from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import GridSearchCV grid = GridSearchCV(estimator=clf, param_grid=param_grid, cv=StratifiedKFold(train_y, n_folds=nfolds), scoring=qwkappa, n_jobs=1, verbose=2, refit=False) grid.fit(train_X, train_y) print('grid scores:') for item in grid.grid_scores_: print(' {:s}'.format(item)) print('best score: {:.5f}'.format(grid.best_score_)) print('best params:', grid.best_params_) pass else: clf.fit(train_X, train_y) final_test_preds = clf.predict(test_X) final_test_preds = rint(clip(final_test_preds, 1, 8)) savetxt(out_csv_file, stack(zip(test['Id'].values, final_test_preds), axis=1).T, delimiter=',', fmt=['%d', '%d'], header='"Id","Response"', comments='') # if not isinstance(clf.xgb, list): # xgb_ensemble = [clf.xgb] # else: # xgb_ensemble = clf.xgb # for xgb in xgb_ensemble: # importance = xgb.booster().get_fscore() # import operator # print(sorted(importance.items()), "\n") # importance = sorted(importance.items(), key=operator.itemgetter(1), reverse=True) # print(importance, "\n") # features = [k for k, _ in importance] # print(len(features), features) feat_imp = clf.feature_importances_ nonzero_features = train_X.columns.values[feat_imp > 0.] print("Features with importance != 0", len(nonzero_features), nonzero_features, sorted(zip(feat_imp[feat_imp > 0.], nonzero_features))) zero_features = train_X.columns.values[feat_imp == 0.] print("Features with importance == 0", zero_features) return def main(argv=None): # IGNORE:C0111 '''Command line options.''' from sys import argv as Argv if argv is None: argv = Argv pass else: Argv.extend(argv) pass from os.path import basename program_name = basename(Argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date) try: program_shortdesc = __import__('__main__').__doc__.split("\n")[1] except: program_shortdesc = __import__('__main__').__doc__ program_license = '''%s Created by Wojciech Migda on %s. Copyright 2016 Wojciech Migda. All rights reserved. Licensed under the MIT License Distributed on an "AS IS" basis without warranties or conditions of any kind, either express or implied. USAGE ''' % (program_shortdesc, str(__date__)) try: from argparse import ArgumentParser from argparse import RawDescriptionHelpFormatter from argparse import FileType from sys import stdout # Setup argument parser parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter) parser.add_argument("-n", "--num-est", type=int, default=700, action='store', dest="nest", help="number of Random Forest estimators") parser.add_argument("-j", "--jobs", type=int, default=-1, action='store', dest="njobs", help="number of jobs") parser.add_argument("-f", "--cv-fold", type=int, default=0, action='store', dest="nfolds", help="number of cross-validation folds") parser.add_argument("--int-fold", type=int, default=6, action='store', dest="int_fold", help="internal fold for PrudentialRegressorCVO2FO") parser.add_argument("-b", "--n-buckets", type=int, default=8, action='store', dest="nbuckets", help="number of buckets for digitizer") parser.add_argument("-o", "--out-csv", action='store', dest="out_csv_file", default=stdout, type=FileType('w'), help="output CSV file name") parser.add_argument("-m", "--minimizer", action='store', dest="minimizer", default='BFGS', type=str, choices=['Powell', 'CG', 'BFGS'], help="minimizer method for scipy.optimize.minimize") parser.add_argument("-M", "--mvector", action='store', dest="mvector", default=[-1.5, -2.6, -3.6, -1.2, -0.8, 0.04, 0.7, 3.6], type=float, nargs='*', help="minimizer's initial params vector") parser.add_argument("-I", "--imputer", action='store', dest="imputer", default=None, type=str, choices=['mean', 'median', 'most_frequent'], help="Imputer strategy, None is -1") parser.add_argument("--clf-params", type=str, default="{}", action='store', dest="clf_params", help="classifier parameters subset to override defaults") parser.add_argument("-G", "--cv-grid", type=str, default="{}", action='store', dest="cv_grid", help="cross-validation grid params (used if NFOLDS > 0)") parser.add_argument("-E", "--estimator", action='store', dest="estimator", default='PrudentialRegressor', type=str,# choices=['mean', 'median', 'most_frequent'], help="Estimator class to use") # Process arguments args = parser.parse_args() for k, v in args.__dict__.items(): print(str(k) + ' => ' + str(v)) pass work(args.out_csv_file, args.estimator, args.nest, args.njobs, args.nfolds, eval(args.cv_grid), args.minimizer, args.nbuckets, args.mvector, args.imputer, eval(args.clf_params), args.int_fold) return 0 except KeyboardInterrupt: ### handle keyboard interrupt ### return 0 except Exception as e: if DEBUG: raise(e) pass indent = len(program_name) * " " from sys import stderr stderr.write(program_name + ": " + repr(e) + "\n") stderr.write(indent + " for help use --help") return 2 pass if __name__ == "__main__": if DEBUG: from sys import argv argv.append("-n 700") argv.append("--minimizer=Powell") argv.append("--clf-params={'learning_rate': 0.05, 'min_child_weight': 240, 'subsample': 0.9, 'colsample_bytree': 0.67, 'max_depth': 6, 'initial_params': [0.1, -1, -2, -1, -0.8, 0.02, 0.8, 1]}") argv.append("-f 10") pass from sys import exit as Exit Exit(main()) pass
mit
-5,448,558,039,769,340,000
50.479864
630
0.600513
false
manjaro/pacman-mirrors
tests/test_command_line_parse.py
1
7250
#!/usr/bin/env python """ test_pacman-mirrors ---------------------------------- Tests for `pacman-mirrors` module. """ import unittest from unittest.mock import patch from pacman_mirrors.functions import cliFn from pacman_mirrors.functions import configFn from pacman_mirrors.pacman_mirrors import PacmanMirrors from . import mock_configuration as conf test_conf = { "branch": "stable", "branches": conf.BRANCHES, "config_file": conf.CONFIG_FILE, "custom_file": conf.CUSTOM_FILE, "method": "rank", "work_dir": conf.WORK_DIR, "mirror_file": conf.MIRROR_FILE, "mirror_list": conf.MIRROR_LIST, "no_update": False, "country_pool": [], "protocols": [], "repo_arch": conf.REPO_ARCH, "status_file": conf.STATUS_FILE, "ssl_verify": True, "url_mirrors_json": conf.URL_MIRROR_JSON, "url_status_json": conf.URL_STATUS_JSON, "x32": False } class TestCommandLineParse(unittest.TestCase): """Pacman Mirrors Test suite""" def setUp(self): """Setup tests""" pass # @patch("os.getuid") # @patch.object(configFn, "setup_config") # def test_arg_branch_unstable(self, mock_build_config, mock_os_getuid): # """TEST: CLI config[branch] from ARG '-b unstable'""" # mock_os_getuid.return_value = 0 # mock_build_config.return_value = test_conf # with unittest.mock.patch("sys.argv", # ["pacman-mirrors", # "-b", "unstable"]): # app = PacmanMirrors() # app.config["config_file"] = conf.CONFIG_FILE # app.config = configFn.setup_config() # cli.parse_command_line(app, True) # assert app.config["branch"] == "unstable" # # @patch("os.getuid") # @patch.object(configFn, "setup_config") # def test_arg_branch_testing(self, mock_build_config, mock_os_getuid): # """TEST: CLI config[branch] from ARG '-b testing'""" # mock_os_getuid.return_value = 0 # mock_build_config.return_value = test_conf # with unittest.mock.patch("sys.argv", # ["pacman-mirrors", # "-b", "testing"]): # app = PacmanMirrors() # app.config["config_file"] = conf.CONFIG_FILE # app.config = configFn.setup_config() # cli.parse_command_line(app, True) # assert app.config["branch"] == "testing" @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_method(self, mock_build_config, mock_os_getuid): """TEST: CLI config[method] from ARG '-m random'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "-m", "random"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.config["method"] == "random" @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_onlycountry(self, mock_build_config, mock_os_getuid): """TEST: CLI config[only_country] from ARG '-c France,Germany'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "-c", "France,Germany"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.config["country_pool"] == ["France", "Germany"] @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_geoip(self, mock_build_config, mock_os_getuid): """TEST: CLI geoip is True from ARG '--geoip'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "--geoip"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.geoip is True @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_fasttrack(self, mock_build_config, mock_os_getuid): """TEST: CLI fasttrack is 5 from ARG '-f 5'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "-f5"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.fasttrack == 5 @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_interactive(self, mock_build_config, mock_os_getuid): """TEST: CLI interactive is true from ARG '-i'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "-i"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.interactive is True @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_max_wait_time(self, mock_build_config, mock_os_getuid): """TEST: CLI max_wait_time is 5 from ARG '-t 5'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "-t5"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.max_wait_time == 5 @patch("os.getuid") @patch.object(configFn, "setup_config") def test_arg_quiet(self, mock_build_config, mock_os_getuid): """TEST: CLI quiet is True from ARG '-q'""" mock_os_getuid.return_value = 0 mock_build_config.return_value = test_conf with unittest.mock.patch("sys.argv", ["pacman-mirrors", "-q"]): app = PacmanMirrors() app.config["config_file"] = conf.CONFIG_FILE app.config = configFn.setup_config() cliFn.parse_command_line(app, True) assert app.quiet is True def tearDown(self): """Tear down""" pass if __name__ == "__main__": unittest.main()
gpl-3.0
2,128,816,080,647,277,000
37.978495
76
0.546759
false
otuncelli/Xpert-Screen-Recorder
src/main.py
1
16935
# -*- coding: utf-8 -*- # ============================================================================= # Xpert Screen Recorder # Copyright (C) 2013 OSMAN TUNCELLI # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ============================================================================= import singleton, logging singleton.logger.setLevel(logging.CRITICAL) singleton.SingleInstance() import pygtk pygtk.require('2.0') import gtk, os, sys, subprocess, operator, signal, webbrowser from datetime import datetime from collections import OrderedDict from ConfigParser import ConfigParser DEBUG_MODE = False if not DEBUG_MODE: sys.stderr = open(os.path.devnull, 'w') LANG = 'en' def T(s): dic = {'Xpert Screen Recorder' : u'Xpert Ekran Görüntüsü Kaydedici', 'Start Recording' : u'Kaydı Başlat', 'Stop Recording' : u'Kaydı Durdur', 'Settings' : 'Ayarlar', 'About' : u'Hakkında', 'Exit' : u'Çıkış', 'Resolution' : u'Çözünürlük', 'Frame rate' : u'Çerçeve hızı', 'Language' : u'Arayüz Dili', 'Save To' : u'Kayıt Yeri', 'Xpert Screen Recorder is a multi-platform screencast recorder.' : u'Xpert Ekran Görüntüsü Kaydedici, ekran görüntüsünü çeşitli platformlarda kaydedebilen bir araçtır.', 'All Done! Do you want to watch the recorded video now?' : u'Tamamlandı! Kaydedilen görüntüyü şimdi izlemek ister misiniz?' } return (dic[s] if LANG == 'tr' else s) class Settings(object): def __init__(self, screen_size, inifile = 'settings.ini'): self.defaults = { 'framerate' : 30, 'resolution' : screen_size, 'saveto' : os.path.expanduser('~'), 'lang' : 'en' } self.active = self.defaults.copy() self.screen_size = screen_size self.dialog_shown = False self.valid_framerates = (15,25,30) self._set_valid_resolutions() self.valid_languages = OrderedDict((('en', 'English'), ('tr', u'Türkçe'))) self.inifile = inifile self.cp = ConfigParser() if os.path.isfile(inifile): self.cp.read(inifile) self.correct(self.cp._defaults) self.active = self.cp._defaults.copy() else: self.cp._defaults = self.defaults.copy() with open(inifile, 'w') as fp: self.cp.write(fp) def correct(self, d): try: d['framerate'] = int(d['framerate']) assert d['framerate'] in self.valid_framerates except: d['framerate'] = self.defaults['framerate'] try: d['resolution'] = eval(d['resolution']) assert d['resolution'] in self.valid_resolutions except: d['resolution'] = self.defaults['resolution'] try: assert os.path.isdir(d['saveto']) except: d['saveto'] = self.defaults['saveto'] try: assert d['lang'] in ('tr', 'en') except: d['lang'] = 'en' def _set_valid_resolutions(self): width_array = (1920, 1680, 1280, 960) aspect_ratio = operator.truediv(*self.screen_size) self.valid_resolutions = tuple((w, int(w / aspect_ratio)) for w in width_array if w <= self.screen_size[0]) def set_framerate(self, framerate): self.active['framerate'] = int(framerate) def set_resolution(self, res): if isinstance(res, basestring): self.active['resolution'] = tuple(res.split('x')) else: self.active['resolution'] = tuple(res) def set_saveto(self, saveto): self.active['saveto'] = saveto def get_framerate(self): return self.active['framerate'] def get_resolution(self): return self.active['resolution'] def get_saveto(self): return self.active['saveto'] def get_language(self): return self.active['lang'] def show_dialog(self, reload_func): self.dialog_shown = True self.reload_func = reload_func dialog = gtk.Dialog() dialog.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_UTILITY) dialog.set_size_request(250,250) dialog.set_resizable(False) dialog.set_position(gtk.WIN_POS_CENTER) label_settings = gtk.Label() label_resolution = gtk.Label() label_framerate = gtk.Label() label_language = gtk.Label() def set_settings_texts(): dialog.set_title(T('Settings')) label_settings.set_markup('<span font_family="Verdana" weight="heavy" size="x-large">' + dialog.get_title() + '</span>') label_resolution.set_text(T('Resolution') + ' :') label_framerate.set_text(T('Frame rate') + ' :') label_language.set_text(T('Language') + ' :') set_settings_texts() store_resolution = gtk.ListStore(str) store_framerate = gtk.ListStore(str) store_language = gtk.ListStore(str) for v in self.valid_languages.values(): store_language.append([v]) renderer = gtk.CellRendererText() renderer.set_alignment(1, 0.5) for vr in self.valid_resolutions: store_resolution.append(['x'.join(map(str, vr))]) self.combo_resolution = gtk.ComboBox(store_resolution) self.combo_resolution.pack_start(renderer) self.combo_resolution.add_attribute(renderer, 'text', 0) self.combo_resolution.set_active(self.valid_resolutions.index(self.get_resolution())) for fr in self.valid_framerates: store_framerate.append([fr]) self.combo_framerate = gtk.ComboBox(store_framerate) self.combo_framerate.pack_start(renderer) self.combo_framerate.add_attribute(renderer, 'text', 0) self.combo_framerate.set_active(self.valid_framerates.index(self.get_framerate())) self.combo_language = gtk.ComboBox(store_language) self.combo_language.pack_start(renderer) self.combo_language.add_attribute(renderer, 'text', 0) self.combo_language.set_active(self.valid_languages.keys().index(self.get_language())) button_browse = gtk.Button(T('Save To')) button_okay = gtk.Button(stock=gtk.STOCK_OK) button_okay.set_size_request(40, -1) button_cancel = gtk.Button(stock=gtk.STOCK_CANCEL) button_cancel.set_size_request(40, -1) padding = 5 table = gtk.Table(rows=3, columns=2, homogeneous=False) xyoptions = dict(xoptions=0, yoptions=0, xpadding=padding, ypadding=padding) table.attach(label_resolution, 0, 1, 0, 1, **xyoptions) table.attach(self.combo_resolution, 1, 2, 0, 1, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding) table.attach(label_framerate, 0, 1, 1, 2, **xyoptions) table.attach(self.combo_framerate, 1, 2, 1, 2, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding) table.attach(label_language, 0, 1, 2, 3, **xyoptions) table.attach(self.combo_language, 1, 2, 2, 3, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding) table.attach(button_browse, 1, 2, 3, 4, xoptions=gtk.FILL|gtk.EXPAND, xpadding=padding, ypadding=padding) vb = dialog.vbox vb.pack_start(label_settings, 1, 0, padding) vb.pack_start(table, 0, 0, padding) hb = gtk.HBox(homogeneous=False, spacing=0) hb.pack_start(button_okay, 1, 1, padding) hb.pack_start(button_cancel, 1, 1, padding) vb.pack_start(hb, 0, 0, padding) saveto = [self.get_saveto()] def on_browse(widget, saveto): fc = gtk.FileChooserDialog(T('Save To'), dialog, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER|gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK)) if os.path.isdir(saveto[0]): fc.set_current_folder(saveto[0]) try: response = fc.run() if response == gtk.RESPONSE_OK: saveto[0] = fc.get_filename() finally: fc.destroy() def on_ok(widget): global LANG LANG = self.active['lang'] = self.valid_languages.keys()[self.combo_language.get_active()] self.active['resolution'] = self.valid_resolutions[self.combo_resolution.get_active()] self.active['framerate'] = self.valid_framerates[self.combo_framerate.get_active()] self.active['saveto'] = saveto[0] self.cp._defaults = self.active.copy() with open(self.inifile, 'w') as fp: self.cp.write(fp) self.reload_func() dialog.destroy() def on_cancel(widget): self.active = self.cp._defaults.copy() dialog.destroy() button_browse.connect('clicked', lambda w : on_browse(w,saveto)) button_okay.connect('clicked', on_ok) button_cancel.connect('clicked', on_cancel) dialog.show_all() dialog.present_with_time(2) dialog.run() self.dialog_shown = False class XpertScreenRecorder(object): def __init__(self, indicator = None): global LANG self.app_version = "1.0" self.app_icon = gtk.StatusIcon() self.app_icon.set_from_stock(gtk.STOCK_MEDIA_PLAY) self.app_icon.connect('popup-menu', self.show_popup) self.app_icon.connect('activate', self.kill_popup) self.settings = Settings(self._get_screen_size()) self.active = self.settings.active LANG = self.active['lang'] self.menu = gtk.Menu() self.mi_rec_start = gtk.MenuItem() self.mi_rec_stop = gtk.MenuItem() self.mi_settings = gtk.MenuItem() self.mi_about = gtk.MenuItem() self.mi_exit = gtk.MenuItem() self._reload_texts() self.mi_rec_start.set_sensitive(True) self.mi_rec_stop.set_sensitive(False) self.mi_rec_start.connect('activate', self.start_recording) self.mi_rec_stop.connect('activate', self.stop_recording) self.mi_settings.connect('activate', lambda _: self.settings.show_dialog(self._reload_texts)) self.mi_about.connect('activate', self.show_about) self.mi_exit.connect('activate', self.exit) for mi in (self.mi_rec_start, self.mi_rec_stop, gtk.SeparatorMenuItem(), self.mi_settings, self.mi_about, self.mi_exit): self.menu.append(mi) self.menu.show_all() if indicator: indicator.set_menu(self.menu) self.indicator = indicator self._recording = False def _reload_texts(self): self.app_title = T('Xpert Screen Recorder') self.app_icon.set_tooltip_text('{} v{}'.format(self.app_title, self.app_version)) self.mi_rec_start.set_label(T('Start Recording')) self.mi_rec_stop.set_label(T('Stop Recording')) self.mi_settings.set_label(T('Settings')) self.mi_about.set_label(T('About')) self.mi_exit.set_label(T('Exit')) def _get_screen_size(self): screen = self.app_icon.get_screen() return screen.get_width(), screen.get_height() def is_recording(self): return self._recording def set_recording(self, boolean): self._recording = boolean self.app_icon.set_blinking(self._recording) if self._recording: if self.indicator: self.indicator.set_status(appindicator.STATUS_ATTENTION) self.app_icon.set_from_stock(gtk.STOCK_MEDIA_RECORD) self.mi_rec_start.set_sensitive(False) self.mi_rec_stop.set_sensitive(True) else: if self.indicator: self.indicator.set_status(appindicator.STATUS_ACTIVE) self.app_icon.set_from_stock(gtk.STOCK_MEDIA_PLAY) delattr(self, 'p') self.mi_rec_start.set_sensitive(True) self.mi_rec_stop.set_sensitive(False) def generate_filename(self): return os.path.join(self.active['saveto'], datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + ".mp4") def start_recording(self, widget): framerate = self.active['framerate'] rtbufsize = bufsize = 2147483647 # you can also use smaller buffer sizes self.filename = self.generate_filename() if sys.platform == 'win32': # ffmpeg for windows cmdline = ['ffmpeg', '-r', framerate, '-rtbufsize', rtbufsize, '-f', 'dshow', '-i', 'video=screen-capture-recorder:audio=virtual-audio-capturer', '-threads', 2, '-pix_fmt', 'yuv420p','-bufsize', bufsize, '-c:v', 'libx264', '-preset', 'ultrafast', '-tune', 'zerolatency', '-threads', 2] startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW else: cmdline = ['avconv', '-rtbufsize', rtbufsize, '-loglevel', 'quiet', '-f', 'alsa', '-i', 'pulse', '-f', 'x11grab', '-s:v', 'x'.join(map(str, self._get_screen_size())), '-i', ':0.0', '-ar', '44100', '-bufsize', bufsize, '-pix_fmt', 'yuv420p', '-c:v', 'libx264', '-c:a', 'libvo_aacenc', '-preset', 'ultrafast', '-tune', 'zerolatency', '-threads', 2] startupinfo = None if not DEBUG_MODE: cmdline += ['-loglevel', 'quiet'] if self.settings.screen_size <> self.active["resolution"]: cmdline += ['-vf', 'scale=%d:-1' % self.active["resolution"][0], '-sws_flags', 'lanczos'] cmdline.append(self.filename) cmdline = map(unicode, cmdline) if DEBUG_MODE: print ' '.join(cmdline) self.p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, startupinfo = startupinfo) self.set_recording(True) def stop_recording(self, widget): if not self.is_recording(): return if sys.platform == 'win32': self.p.communicate('q\\n') else: self.p.send_signal(signal.SIGINT) self.p.wait() self.set_recording(False) md = gtk.MessageDialog(None, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, T('All Done! Do you want to watch the recorded video now?')) md.set_position(gtk.WIN_POS_CENTER) response = md.run() md.destroy() if response == gtk.RESPONSE_YES: webbrowser.open(self.filename) def show_about(self, widget): about = gtk.AboutDialog() about.set_position(gtk.WIN_POS_CENTER) about.set_icon_name (self.app_title) about.set_name(self.app_title) about.set_version('v1.0') about.set_comments(T('Xpert Screen Recorder is a multi-platform screencast recorder.')) about.set_authors([u'Osman Tunçelli <tuncelliosman-at-gmail.com>']) about.run() about.destroy() def exit(self, widget): self.stop_recording(widget) self.app_icon.set_visible(False) gtk.main_quit() def kill_popup(self, widget): if hasattr(self, 'menu'): self.menu.popdown() def show_popup(self, icon, event_button, event_time): if not self.settings.dialog_shown: self.menu.popup(None, None, None if os.name == 'nt' else gtk.status_icon_position_menu, event_button, event_time, self.app_icon) main = gtk.main if __name__ == "__main__": indicator = None if sys.platform == 'linux2': import appindicator indicator = appindicator.Indicator("Xpert", "gtk-media-play-ltr", appindicator.CATEGORY_APPLICATION_STATUS) indicator.set_attention_icon(gtk.STOCK_MEDIA_RECORD) indicator.set_status(appindicator.STATUS_ACTIVE) app = XpertScreenRecorder(indicator) app.main()
gpl-3.0
-6,111,160,903,389,773,000
42.986979
180
0.584192
false
jparal/loopy
loopy/io.py
1
3026
import tables as pt import numpy as np import loopy as lpy import shutil as sh # move import os.path as pth # exists def readhdf5(fname, path='/'): """ .. py:function:: writehdf5(fname, path='/') The function traverse HDF5 files and creates structured dictionary. :param fname: File name to read. :param path: Root path from where to start reading. :rtype: loopy.struct (i.e. dictionary) or variable """ def _traverse_tree(h5f, path): # Remove double slashes and the last one path = '/'+'/'.join(filter(None, path.split('/'))) gloc = ''.join(path.rpartition('/')[0:2]) name = path.rpartition('/')[2] # We want to read a single variable groups = h5f.listNodes(where=gloc, classname='Group') nodes = h5f.listNodes(where=gloc) leafs = [n for n in nodes if n not in groups] leaf = [n for n in leafs if n.name == name] if len(leaf) == 1: return leaf[0].read() dat = lpy.struct() for node in h5f.listNodes(where=path): name = node._v_name dat[name] = _traverse_tree(h5f, path+'/'+name) return dat h5f = pt.File(fname, 'r') dat = _traverse_tree(h5f, path) h5f.close() return dat def writehdf5(fname, data, path='/', append=False, backup=False): """ .. py:function:: writehdf5(fname, data, path='/', append=False) The function writes HDF5 file using PyTables and CArray. This is high level function which shoud handle the most common scenarios. :param fname: name of the HDF5 file :param path: location inside of HDF5 file (e.g. /new/Bz) :param data: the actual data to be stored :type data: dict or ndarray otherwise will be converted into ndarray :param append: Should the data be appended to an existing file? :param backup: This argument if True rename the file to .bak instead of overwriting the file. :rtype: none """ if backup and pth.exists(fname): sh.move(fname, fname+'.bak') mode = 'a' if append else 'w' filters = pt.Filters(complevel=6) h5f = pt.File(fname, mode) # Remove double slashes and the last one path = '/'+'/'.join(filter(None, path.split('/'))) dloc = path.rsplit('/',1) root = dloc[0] if np.size(dloc) > 1 else '/' root = root if root.startswith('/') else '/' + root name = path if np.size(dloc) == 1 else dloc[1] if isinstance(data, dict): h5f.close() for key in data.keys(): writehdf5(fname, data[key], path=path+'/'+key, append=True) return if not isinstance(data, np.ndarray): data = np.array(data, ndmin=1) atm = pt.Atom.from_dtype(data.dtype) arr = h5f.createCArray(root, name, atm, data.shape, \ createparents=True, filters=filters) arr[:] = data h5f.close() return from pyhdf.SD import SD, SDC def loadhdf4(fname,variable): data_set = SD(fname, SDC.READ) return data_set.select(variable)[:]
gpl-2.0
-3,930,732,108,166,029,000
30.195876
77
0.61236
false
PeterHenell/performance-dashboard
performance-collector2/query.py
1
1713
import types class Query: """ Queries are a way for collectors to collect data. They are one way of getting data from the source. query_name - the name of the query key_column - the name of the key column in the result Does not produces anything but are a field of source. Only contain metadata about the query. Source can add functions to Query for collecting data from some kind of server. get_data_fun - the function or callable class to call in order for the query to collect data mapping - elasticsearch mapping specific for this query. If some of the fields from this query need to be mapped differently. Used during init of the indexes. non_data_fields = [] - Fields which should not be part of the delta calculations, instead be sent directly to es. """ def __init__(self, get_data, query_name, key_column, mapping, non_data_fields): assert isinstance(get_data, types.FunctionType) \ or callable(get_data), "get_data must be a function or callable class" assert len(query_name) > 0, "query_name must be a string" assert len(key_column) > 0, "key_column must have some value" assert type(mapping) is dict, "mapping must be a dictionary" assert type(non_data_fields) is list, "non_data_fields must be a list" self.query_name = query_name self.key_column = key_column self.mapping = mapping self.non_data_fields = non_data_fields self.get_data = get_data def get_data(self): result = self.get_data() assert type(result) is list, "Result from get_data function must be list of dict" return result
apache-2.0
7,730,568,456,058,332,000
41.825
117
0.669002
false
rcatwood/Savu
savu/plugins/loaders/multi_modal_loaders/i18_loaders/i18stxm_loader.py
1
1776
# Copyright 2014 Diamond Light Source Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: I18stxm_loader :platform: Unix :synopsis: A class for loading I18's stxm data .. moduleauthor:: Aaron Parsons <[email protected]> """ from savu.plugins.loaders.multi_modal_loaders.base_i18_multi_modal_loader import BaseI18MultiModalLoader from savu.plugins.utils import register_plugin @register_plugin class I18stxmLoader(BaseI18MultiModalLoader): """ A class to load tomography data from an NXstxm file :param stxm_detector: path to stxm. Default:'entry1/raster_counterTimer01/It'. """ def __init__(self, name='I18stxmLoader'): super(I18stxmLoader, self).__init__(name) def setup(self): """ Define the input nexus file :param path: The full path of the NeXus file to load. :type path: str """ data_str = self.parameters['stxm_detector'] data_obj = self.multi_modal_setup('stxm') data_obj.data = data_obj.backing_file[data_str] data_obj.set_shape(data_obj.data.shape) self.set_motors(data_obj, 'stxm') self.add_patterns_based_on_acquisition(data_obj, 'stxm') self.set_data_reduction_params(data_obj)
gpl-3.0
2,177,186,371,746,904,600
31.888889
104
0.695383
false
aamlima/discobot
MPUtils.py
1
4400
import array import os from disco.voice.playable import (AbstractOpus, BasePlayable, BufferedIO, OpusEncoder, YoutubeDLInput) from disco.voice.queue import PlayableQueue from gevent.fileobject import FileObjectThread class YoutubeDLFInput(YoutubeDLInput): def read(self, sz): if sz is 0: if not os.path.isfile(os.path.join('data', self.info['id'])): f_obj = open(os.path.join('data', self.info['id']), 'wb') file = FileObjectThread(f_obj, 'wb') super(YoutubeDLFInput, self).read(0) file.write(self._buffer.read()) file.close() self.close() return b'' if not self._buffer: if os.path.isfile(os.path.join('data', self.info['id'])): with open(os.path.join('data', self.info['id']), 'rb') as file: self._buffer = BufferedIO(file.read()) else: f_obj = open(os.path.join('data', self.info['id']), 'wb') file = FileObjectThread(f_obj, 'wb') super(YoutubeDLFInput, self).read(0) file.write(self._buffer.read()) file.close() self._buffer.seekable() and self._buffer.seek(0) return self._buffer.read(sz) def close(self): if self._buffer: self._buffer.close() self._buffer = None class UnbufferedOpusEncoderPlayable(BasePlayable, OpusEncoder, AbstractOpus): def __init__(self, source, *args, **kwargs): self.source = source if hasattr(source, 'info'): self.info = source.info self.volume = 0.1 library_path = kwargs.pop('library_path', None) AbstractOpus.__init__(self, *args, **kwargs) OpusEncoder.__init__(self, self.sampling_rate, self.channels, library_path=library_path) self.source.read(0) def next_frame(self): if self.source: raw = self.source.read(self.frame_size) if len(raw) < self.frame_size: self.source.close() return None if self.volume == 1.0: return self.encode(raw, self.samples_per_frame) buffer = array.array('h', raw) for pos, byte in enumerate(buffer): buffer[pos] = int(min(32767, max(-32767, byte * self.volume))) return self.encode(buffer.tobytes(), self.samples_per_frame) return None class CircularQueue(PlayableQueue): def get(self): # pylint: disable=W0212 item = self._get() if item.source and item.source._buffer and item.source._buffer.seekable(): item.source._buffer.seek(0) self.append(item) return item def remove(self, index): if len(self._data) > index: return self._data.pop(index) return None def prepend(self, item): self._data.insert(0, item) if self._event: self._event.set() self._event = None def contains(self, item, func): for i in self._data: if func(i, item): return True return False def gen_player_data(player): data = {} data['paused'] = True if player.paused else False data['volume'] = player.volume data['duckingVolume'] = player.ducking_volume data['autopause'] = player.autopause data['autovolume'] = player.autovolume data['queue'] = len(player.queue) data['items'] = len(player.items) data['playlist'] = [{'id': value.info['id'], 'title':value.info['title'], 'duration':value.info['duration'], 'webpageUrl':value.info['webpage_url']} for value in player.queue] data['curItem'] = None if player.now_playing: data['curItem'] = { 'id': player.now_playing.info['id'], 'duration': player.now_playing.info['duration'], 'webpageUrl': player.now_playing.info['webpage_url'], 'title': player.now_playing.info['title'], 'thumbnail': player.now_playing.info['thumbnail'], 'fps': player.now_playing.sampling_rate * player.now_playing.sample_size / player.now_playing.frame_size, 'frame': player.tell_or_seek() / player.now_playing.frame_size } return data
gpl-3.0
3,910,258,917,146,183,000
33.645669
126
0.560227
false
degoldschmidt/fly-analysis
src/experiment_stop.py
1
2820
""" Experiment stop (experiment_stop.py) This script takes a video and calculates the frame number of when the experiment was stopped, based on overall pixel changes. D.Goldschmidt - 09/08/16 """ import warnings warnings.filterwarnings("ignore") import numpy as np import cv2 import os import matplotlib.pyplot as plt ## package for plotting __VERBOSE = True def averag(input): sum = 0.*input[0] for vals in input: sum += vals return sum/len(input) # Func to print out only if VERBOSE def vprint(*arg): if __VERBOSE: s= " ".join( map( str, arg ) ) print(s) # Local test #folder = "/Users/degoldschmidt/" #filename = "output.avi" folder = "/Volumes/Elements/raw_data_flies/0727/" filename="VidSave_0726_20-13.avi" profolder = "../tmp/vid/" if not os.path.isfile(profolder + filename): os.system("ffmpeg -i " + folder + filename + " -vf fps=fps=4 -f avi -c:v libx264 -s 50x50 " + profolder + filename) ## maybe in fly logger cap = cv2.VideoCapture(profolder + filename) if not cap.isOpened(): print("Error: Could not open") length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) print("Open video", profolder + filename, "(","#frames:", length, "dims:", (width,height), "fps:", fps,")") delta = [] i=0 filter = int(500/fps) motionthr=50 frames = filter*[None] while(i+1 < length): if i%1000==0: vprint(i) # Capture frame-by-frame ret, gray = cap.read() # Our operations on the frame come here gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY) # center and radius are the results of HoughCircle # mask is a CV_8UC1 image with 0 mask = np.zeros((gray.shape[0], gray.shape[1]), dtype = "uint8") cv2.circle( mask, (int(width/2),int(height/2)), int(width/2-width/20), (255,255,255), -1, 8, 0 ) res = np.zeros((gray.shape[0], gray.shape[1]), dtype = "uint8") np.bitwise_and(gray, mask, res) if i>0: frames[(i-1)%filter] = res-oldpx if i > filter-1: out = averag(frames) if __VERBOSE: cv2.imshow('frame', out) delta.append(sum(sum(out))) oldpx = res i=i+1 if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() ddelta = [j-i for i, j in zip(delta[:-1], delta[1:])] plt.plot(delta[:],'k--', label="Sum of lowpass-filtered px changes") plt.plot(ddelta[:],'r-', label= "Temp. difference") plt.legend() if __VERBOSE: plt.show() ddelta = np.asarray(ddelta) stopframes = np.asarray(np.nonzero(ddelta > motionthr)) if stopframes.size > 0: print("Experiment stops at frame", stopframes[0,0]) else: print("No experiment stop detected")
gpl-3.0
2,363,480,963,356,214,000
27.21
142
0.63227
false
Amarandus/xmppsh
plugins/ipdb.py
1
1331
import socket import sqlite3 class Plugin: def __init__(self, parser, sqlitecur): self._cursor = sqlitecur self._cursor.execute("CREATE TABLE IF NOT EXISTS IPs(Id INT, Name TEXT, IP TEXT, MUC TEXT)") parser.registerCommand([(u"ip", ), (u"list", "List all registered IPs", self._list)]) parser.registerCommand([(u"ip", ), (u"register", "Register your IP", self._register)]) def _list(self, ignore, fromUser): self._cursor.execute("SELECT Name, IP FROM IPs WHERE MUC=?", (fromUser.bare, )) rows = self._cursor.fetchall() msgtext = "" for r in rows: msgtext += "%s - %s\n" % (r[1], r[0]) return (msgtext, 0) def _register(self, ip, fromUser): try: socket.inet_aton(ip[0]) name = fromUser.resource muc = fromUser.bare self._cursor.execute("UPDATE OR IGNORE IPs SET IP=? WHERE Name=? AND MUC=?", (ip[0], name, muc)) if self._cursor.rowcount == 0: self._cursor.execute("INSERT OR IGNORE INTO IPs (IP, Name, MUC) VALUES (?, ?, ?)", (ip[0], name, muc)) return ("Your IP %s has been added" % (ip[0]), 1) except socket.error: return ("Your IP looks malformed", 1) except: return ("You omitted the IP", 1)
mit
-3,799,865,405,480,094,000
40.59375
118
0.557476
false
jpzk/evopy
evopy/examples/problems/TR/ORIDSESSVC.py
1
2168
''' This file is part of evopy. Copyright 2012, Jendrik Poloczek evopy is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. evopy is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with evopy. If not, see <http://www.gnu.org/licenses/>. ''' from sys import path path.append("../../../..") from numpy import matrix from sklearn.cross_validation import KFold from evopy.strategies.ori_dses_svc import ORIDSESSVC from evopy.problems.tr_problem import TRProblem from evopy.simulators.simulator import Simulator from evopy.metamodel.dses_svc_linear_meta_model import DSESSVCLinearMetaModel from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear from evopy.operators.termination.accuracy import Accuracy def get_method(): sklearn_cv = SVCCVSkGridLinear(\ C_range = [2 ** i for i in range(-1, 14, 2)], cv_method = KFold(20, 5)) meta_model = DSESSVCLinearMetaModel(\ window_size = 10, scaling = ScalingStandardscore(), crossvalidation = sklearn_cv, repair_mode = 'mirror') method = ORIDSESSVC(\ mu = 15, lambd = 100, theta = 0.3, pi = 70, initial_sigma = matrix([[4.5, 4.5]]), delta = 4.5, tau0 = 0.5, tau1 = 0.6, initial_pos = matrix([[10.0, 10.0]]), beta = 1.0, meta_model = meta_model) return method if __name__ == "__main__": problem = TRProblem() optimizer = get_method() print optimizer.description print problem.description optfit = problem.optimum_fitness() sim = Simulator(optimizer, problem, Accuracy(optfit, 10**(-3))) results = sim.simulate()
gpl-3.0
-3,507,452,398,660,205,600
30.42029
79
0.688653
false
dhcrzf/zulip
corporate/tests/test_stripe.py
1
50746
from datetime import datetime from decimal import Decimal from functools import wraps from mock import Mock, patch import operator import os import re import sys from typing import Any, Callable, Dict, List, Optional, TypeVar, Tuple, cast import ujson import json from django.core import signing from django.core.management import call_command from django.core.urlresolvers import get_resolver from django.http import HttpResponse from django.utils.timezone import utc as timezone_utc import stripe from zerver.lib.actions import do_deactivate_user, do_create_user, \ do_activate_user, do_reactivate_user, do_create_realm from zerver.lib.test_classes import ZulipTestCase from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp from zerver.models import Realm, UserProfile, get_realm, RealmAuditLog from corporate.lib.stripe import catch_stripe_errors, attach_discount_to_realm, \ get_seat_count, sign_string, unsign_string, \ BillingError, StripeCardError, StripeConnectionError, stripe_get_customer, \ DEFAULT_INVOICE_DAYS_UNTIL_DUE, MIN_INVOICED_LICENSES, do_create_customer, \ add_months, next_month, next_renewal_date, renewal_amount, \ compute_plan_parameters, update_or_create_stripe_customer from corporate.models import Customer, CustomerPlan, Plan, Coupon from corporate.views import payment_method_string import corporate.urls CallableT = TypeVar('CallableT', bound=Callable[..., Any]) GENERATE_STRIPE_FIXTURES = False STRIPE_FIXTURES_DIR = "corporate/tests/stripe_fixtures" # TODO: check that this creates a token similar to what is created by our # actual Stripe Checkout flows def stripe_create_token(card_number: str="4242424242424242") -> stripe.Token: return stripe.Token.create( card={ "number": card_number, "exp_month": 3, "exp_year": 2033, "cvc": "333", "name": "Ada Starr", "address_line1": "Under the sea,", "address_city": "Pacific", "address_zip": "33333", "address_country": "United States", }) def stripe_fixture_path(decorated_function_name: str, mocked_function_name: str, call_count: int) -> str: # Make the eventual filename a bit shorter, and also we conventionally # use test_* for the python test files if decorated_function_name[:5] == 'test_': decorated_function_name = decorated_function_name[5:] return "{}/{}:{}.{}.json".format( STRIPE_FIXTURES_DIR, decorated_function_name, mocked_function_name[7:], call_count) def fixture_files_for_function(decorated_function: CallableT) -> List[str]: # nocoverage decorated_function_name = decorated_function.__name__ if decorated_function_name[:5] == 'test_': decorated_function_name = decorated_function_name[5:] return sorted(['{}/{}'.format(STRIPE_FIXTURES_DIR, f) for f in os.listdir(STRIPE_FIXTURES_DIR) if f.startswith(decorated_function_name + ':')]) def generate_and_save_stripe_fixture(decorated_function_name: str, mocked_function_name: str, mocked_function: CallableT) -> Callable[[Any, Any], Any]: # nocoverage def _generate_and_save_stripe_fixture(*args: Any, **kwargs: Any) -> Any: # Note that mock is not the same as mocked_function, even though their # definitions look the same mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__]) fixture_path = stripe_fixture_path(decorated_function_name, mocked_function_name, mock.call_count) try: # Talk to Stripe stripe_object = mocked_function(*args, **kwargs) except stripe.error.StripeError as e: with open(fixture_path, 'w') as f: error_dict = e.__dict__ error_dict["headers"] = dict(error_dict["headers"]) f.write(json.dumps(error_dict, indent=2, separators=(',', ': '), sort_keys=True) + "\n") raise e with open(fixture_path, 'w') as f: if stripe_object is not None: f.write(str(stripe_object) + "\n") else: f.write("{}\n") return stripe_object return _generate_and_save_stripe_fixture def read_stripe_fixture(decorated_function_name: str, mocked_function_name: str) -> Callable[[Any, Any], Any]: def _read_stripe_fixture(*args: Any, **kwargs: Any) -> Any: mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__]) fixture_path = stripe_fixture_path(decorated_function_name, mocked_function_name, mock.call_count) fixture = ujson.load(open(fixture_path, 'r')) # Check for StripeError fixtures if "json_body" in fixture: requestor = stripe.api_requestor.APIRequestor() # This function will raise the relevant StripeError according to the fixture requestor.interpret_response(fixture["http_body"], fixture["http_status"], fixture["headers"]) return stripe.util.convert_to_stripe_object(fixture) return _read_stripe_fixture def delete_fixture_data(decorated_function: CallableT) -> None: # nocoverage for fixture_file in fixture_files_for_function(decorated_function): os.remove(fixture_file) def normalize_fixture_data(decorated_function: CallableT, tested_timestamp_fields: List[str]=[]) -> None: # nocoverage # stripe ids are all of the form cus_D7OT2jf5YAtZQ2 id_lengths = [ ('cus', 14), ('sub', 14), ('si', 14), ('sli', 14), ('req', 14), ('tok', 24), ('card', 24), ('txn', 24), ('ch', 24), ('in', 24), ('ii', 24), ('test', 12), ('src_client_secret', 24), ('src', 24), ('invst', 26)] # We'll replace cus_D7OT2jf5YAtZQ2 with something like cus_NORMALIZED0001 pattern_translations = { "%s_[A-Za-z0-9]{%d}" % (prefix, length): "%s_NORMALIZED%%0%dd" % (prefix, length - 10) for prefix, length in id_lengths } # We'll replace "invoice_prefix": "A35BC4Q" with something like "invoice_prefix": "NORMA01" pattern_translations.update({ '"invoice_prefix": "([A-Za-z0-9]{7})"': 'NORMA%02d', '"fingerprint": "([A-Za-z0-9]{16})"': 'NORMALIZED%06d', '"number": "([A-Za-z0-9]{7}-[A-Za-z0-9]{4})"': 'NORMALI-%04d', '"address": "([A-Za-z0-9]{9}-test_[A-Za-z0-9]{12})"': '000000000-test_NORMALIZED%02d', # Don't use (..) notation, since the matched strings may be small integers that will also match # elsewhere in the file '"realm_id": "[0-9]+"': '"realm_id": "%d"', }) # Normalizing across all timestamps still causes a lot of variance run to run, which is # why we're doing something a bit more complicated for i, timestamp_field in enumerate(tested_timestamp_fields): # Don't use (..) notation, since the matched timestamp can easily appear in other fields pattern_translations[ '"%s": 1[5-9][0-9]{8}(?![0-9-])' % (timestamp_field,) ] = '"%s": 1%02d%%07d' % (timestamp_field, i+1) normalized_values = {pattern: {} for pattern in pattern_translations.keys()} # type: Dict[str, Dict[str, str]] for fixture_file in fixture_files_for_function(decorated_function): with open(fixture_file, "r") as f: file_content = f.read() for pattern, translation in pattern_translations.items(): for match in re.findall(pattern, file_content): if match not in normalized_values[pattern]: normalized_values[pattern][match] = translation % (len(normalized_values[pattern]) + 1,) file_content = file_content.replace(match, normalized_values[pattern][match]) file_content = re.sub(r'(?<="risk_score": )(\d+)', '00', file_content) file_content = re.sub(r'(?<="times_redeemed": )(\d+)', '00', file_content) # Dates file_content = re.sub(r'(?<="Date": )"(.* GMT)"', '"NORMALIZED DATETIME"', file_content) file_content = re.sub(r'[0-3]\d [A-Z][a-z]{2} 20[1-2]\d', 'NORMALIZED DATE', file_content) # IP addresses file_content = re.sub(r'"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"', '"0.0.0.0"', file_content) # All timestamps not in tested_timestamp_fields file_content = re.sub(r': (1[5-9][0-9]{8})(?![0-9-])', ': 1000000000', file_content) with open(fixture_file, "w") as f: f.write(file_content) MOCKED_STRIPE_FUNCTION_NAMES = ["stripe.{}".format(name) for name in [ "Charge.create", "Charge.list", "Coupon.create", "Customer.create", "Customer.retrieve", "Customer.save", "Invoice.create", "Invoice.finalize_invoice", "Invoice.list", "Invoice.upcoming", "InvoiceItem.create", "InvoiceItem.list", "Plan.create", "Product.create", "Subscription.create", "Subscription.delete", "Subscription.retrieve", "Subscription.save", "Token.create", ]] def mock_stripe(tested_timestamp_fields: List[str]=[], generate: Optional[bool]=None) -> Callable[[CallableT], CallableT]: def _mock_stripe(decorated_function: CallableT) -> CallableT: generate_fixture = generate if generate_fixture is None: generate_fixture = GENERATE_STRIPE_FIXTURES for mocked_function_name in MOCKED_STRIPE_FUNCTION_NAMES: mocked_function = operator.attrgetter(mocked_function_name)(sys.modules[__name__]) if generate_fixture: side_effect = generate_and_save_stripe_fixture( decorated_function.__name__, mocked_function_name, mocked_function) # nocoverage else: side_effect = read_stripe_fixture(decorated_function.__name__, mocked_function_name) decorated_function = patch(mocked_function_name, side_effect=side_effect)(decorated_function) @wraps(decorated_function) def wrapped(*args: Any, **kwargs: Any) -> Any: if generate_fixture: # nocoverage delete_fixture_data(decorated_function) val = decorated_function(*args, **kwargs) normalize_fixture_data(decorated_function, tested_timestamp_fields) return val else: return decorated_function(*args, **kwargs) return cast(CallableT, wrapped) return _mock_stripe # A Kandra is a fictional character that can become anything. Used as a # wildcard when testing for equality. class Kandra(object): # nocoverage: TODO def __eq__(self, other: Any) -> bool: return True class StripeTest(ZulipTestCase): def setUp(self, *mocks: Mock) -> None: # TODO # Unfortunately this test suite is likely not robust to users being # added in populate_db. A quick hack for now to ensure get_seat_count is 8 # for these tests (8, since that's what it was when the tests were written). realm = get_realm('zulip') seat_count = get_seat_count(get_realm('zulip')) assert(seat_count >= 8) if seat_count > 8: # nocoverage for user in UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False) \ .exclude(email__in=[ self.example_email('hamlet'), self.example_email('iago')])[6:]: user.is_active = False user.save(update_fields=['is_active']) self.assertEqual(get_seat_count(get_realm('zulip')), 8) self.seat_count = 8 self.signed_seat_count, self.salt = sign_string(str(self.seat_count)) # Choosing dates with corresponding timestamps below 1500000000 so that they are # not caught by our timestamp normalization regex in normalize_fixture_data self.now = datetime(2012, 1, 2, 3, 4, 5).replace(tzinfo=timezone_utc) self.next_month = datetime(2012, 2, 2, 3, 4, 5).replace(tzinfo=timezone_utc) self.next_year = datetime(2013, 1, 2, 3, 4, 5).replace(tzinfo=timezone_utc) def get_signed_seat_count_from_response(self, response: HttpResponse) -> Optional[str]: match = re.search(r'name=\"signed_seat_count\" value=\"(.+)\"', response.content.decode("utf-8")) return match.group(1) if match else None def get_salt_from_response(self, response: HttpResponse) -> Optional[str]: match = re.search(r'name=\"salt\" value=\"(\w+)\"', response.content.decode("utf-8")) return match.group(1) if match else None def upgrade(self, invoice: bool=False, talk_to_stripe: bool=True, realm: Optional[Realm]=None, del_args: List[str]=[], **kwargs: Any) -> HttpResponse: host_args = {} if realm is not None: # nocoverage: TODO host_args['HTTP_HOST'] = realm.host response = self.client_get("/upgrade/", **host_args) params = { 'schedule': 'annual', 'signed_seat_count': self.get_signed_seat_count_from_response(response), 'salt': self.get_salt_from_response(response)} # type: Dict[str, Any] if invoice: # send_invoice params.update({ 'billing_modality': 'send_invoice', 'licenses': 123}) else: # charge_automatically stripe_token = None if not talk_to_stripe: stripe_token = 'token' stripe_token = kwargs.get('stripe_token', stripe_token) if stripe_token is None: stripe_token = stripe_create_token().id params.update({ 'billing_modality': 'charge_automatically', 'license_management': 'automatic', 'stripe_token': stripe_token, }) params.update(kwargs) for key in del_args: if key in params: del params[key] for key, value in params.items(): params[key] = ujson.dumps(value) return self.client_post("/json/billing/upgrade", params, **host_args) @patch("corporate.lib.stripe.billing_logger.error") def test_catch_stripe_errors(self, mock_billing_logger_error: Mock) -> None: @catch_stripe_errors def raise_invalid_request_error() -> None: raise stripe.error.InvalidRequestError( "message", "param", "code", json_body={}) with self.assertRaises(BillingError) as context: raise_invalid_request_error() self.assertEqual('other stripe error', context.exception.description) mock_billing_logger_error.assert_called() @catch_stripe_errors def raise_card_error() -> None: error_message = "The card number is not a valid credit card number." json_body = {"error": {"message": error_message}} raise stripe.error.CardError(error_message, "number", "invalid_number", json_body=json_body) with self.assertRaises(StripeCardError) as context: raise_card_error() self.assertIn('not a valid credit card', context.exception.message) self.assertEqual('card error', context.exception.description) mock_billing_logger_error.assert_called() def test_billing_not_enabled(self) -> None: with self.settings(BILLING_ENABLED=False): self.login(self.example_email("iago")) response = self.client_get("/upgrade/") self.assert_in_success_response(["Page not found (404)"], response) @mock_stripe(tested_timestamp_fields=["created"]) def test_upgrade_by_card(self, *mocks: Mock) -> None: user = self.example_user("hamlet") self.login(user.email) response = self.client_get("/upgrade/") self.assert_in_success_response(['Pay annually'], response) self.assertNotEqual(user.realm.plan_type, Realm.STANDARD) self.assertFalse(Customer.objects.filter(realm=user.realm).exists()) # Click "Make payment" in Stripe Checkout with patch('corporate.lib.stripe.timezone_now', return_value=self.now): self.upgrade() # Check that we correctly created a Customer object in Stripe stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id) self.assertEqual(stripe_customer.default_source.id[:5], 'card_') self.assertEqual(stripe_customer.description, "zulip (Zulip Dev)") self.assertEqual(stripe_customer.discount, None) self.assertEqual(stripe_customer.email, user.email) self.assertEqual(dict(stripe_customer.metadata), {'realm_id': str(user.realm.id), 'realm_str': 'zulip'}) # Check Charges in Stripe stripe_charges = [charge for charge in stripe.Charge.list(customer=stripe_customer.id)] self.assertEqual(len(stripe_charges), 1) self.assertEqual(stripe_charges[0].amount, 8000 * self.seat_count) # TODO: fix Decimal self.assertEqual(stripe_charges[0].description, "Upgrade to Zulip Standard, $80.0 x {}".format(self.seat_count)) self.assertEqual(stripe_charges[0].receipt_email, user.email) self.assertEqual(stripe_charges[0].statement_descriptor, "Zulip Standard") # Check Invoices in Stripe stripe_invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)] self.assertEqual(len(stripe_invoices), 1) self.assertIsNotNone(stripe_invoices[0].finalized_at) invoice_params = { # auto_advance is False because the invoice has been paid 'amount_due': 0, 'amount_paid': 0, 'auto_advance': False, 'billing': 'charge_automatically', 'charge': None, 'status': 'paid', 'total': 0} for key, value in invoice_params.items(): self.assertEqual(stripe_invoices[0].get(key), value) # Check Line Items on Stripe Invoice stripe_line_items = [item for item in stripe_invoices[0].lines] self.assertEqual(len(stripe_line_items), 2) line_item_params = { 'amount': 8000 * self.seat_count, 'description': 'Zulip Standard', 'discountable': False, 'period': { 'end': datetime_to_timestamp(self.next_year), 'start': datetime_to_timestamp(self.now)}, # There's no unit_amount on Line Items, probably because it doesn't show up on the # user-facing invoice. We could pull the Invoice Item instead and test unit_amount there, # but testing the amount and quantity seems sufficient. 'plan': None, 'proration': False, 'quantity': self.seat_count} for key, value in line_item_params.items(): self.assertEqual(stripe_line_items[0].get(key), value) line_item_params = { 'amount': -8000 * self.seat_count, 'description': 'Payment (Card ending in 4242)', 'discountable': False, 'plan': None, 'proration': False, 'quantity': 1} for key, value in line_item_params.items(): self.assertEqual(stripe_line_items[1].get(key), value) # Check that we correctly populated Customer and CustomerPlan in Zulip customer = Customer.objects.filter(stripe_customer_id=stripe_customer.id, realm=user.realm).first() self.assertTrue(CustomerPlan.objects.filter( customer=customer, licenses=self.seat_count, automanage_licenses=True, price_per_license=8000, fixed_price=None, discount=None, billing_cycle_anchor=self.now, billing_schedule=CustomerPlan.ANNUAL, billed_through=self.now, next_billing_date=self.next_month, tier=CustomerPlan.STANDARD, status=CustomerPlan.ACTIVE).exists()) # Check RealmAuditLog audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user) .values_list('event_type', 'event_time').order_by('id')) self.assertEqual(audit_log_entries, [ (RealmAuditLog.STRIPE_CUSTOMER_CREATED, timestamp_to_datetime(stripe_customer.created)), (RealmAuditLog.STRIPE_CARD_CHANGED, timestamp_to_datetime(stripe_customer.created)), (RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now), # TODO: Check for REALM_PLAN_TYPE_CHANGED # (RealmAuditLog.REALM_PLAN_TYPE_CHANGED, Kandra()), ]) self.assertEqual(ujson.loads(RealmAuditLog.objects.filter( event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list( 'extra_data', flat=True).first())['licenses'], self.seat_count) # Check that we correctly updated Realm realm = get_realm("zulip") self.assertEqual(realm.plan_type, Realm.STANDARD) self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX) # Check that we can no longer access /upgrade response = self.client_get("/upgrade/") self.assertEqual(response.status_code, 302) self.assertEqual('/billing/', response.url) # Check /billing has the correct information response = self.client_get("/billing/") self.assert_not_in_success_response(['Pay annually'], response) for substring in [ 'Zulip Standard', str(self.seat_count), 'Your plan will renew on', 'January 2, 2013', '$%s.00' % (80 * self.seat_count,), 'Visa ending in 4242', 'Update card']: self.assert_in_response(substring, response) @mock_stripe(tested_timestamp_fields=["created"]) def test_upgrade_by_invoice(self, *mocks: Mock) -> None: user = self.example_user("hamlet") self.login(user.email) # Click "Make payment" in Stripe Checkout with patch('corporate.lib.stripe.timezone_now', return_value=self.now): self.upgrade(invoice=True) # Check that we correctly created a Customer in Stripe stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id) # It can take a second for Stripe to attach the source to the customer, and in # particular it may not be attached at the time stripe_get_customer is called above, # causing test flakes. # So commenting the next line out, but leaving it here so future readers know what # is supposed to happen here # self.assertEqual(stripe_customer.default_source.type, 'ach_credit_transfer') # Check Charges in Stripe self.assertFalse(stripe.Charge.list(customer=stripe_customer.id)) # Check Invoices in Stripe stripe_invoices = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer.id)] self.assertEqual(len(stripe_invoices), 1) self.assertIsNotNone(stripe_invoices[0].due_date) self.assertIsNotNone(stripe_invoices[0].finalized_at) invoice_params = { 'amount_due': 8000 * 123, 'amount_paid': 0, 'attempt_count': 0, 'auto_advance': True, 'billing': 'send_invoice', 'statement_descriptor': 'Zulip Standard', 'status': 'open', 'total': 8000 * 123} for key, value in invoice_params.items(): self.assertEqual(stripe_invoices[0].get(key), value) # Check Line Items on Stripe Invoice stripe_line_items = [item for item in stripe_invoices[0].lines] self.assertEqual(len(stripe_line_items), 1) line_item_params = { 'amount': 8000 * 123, 'description': 'Zulip Standard', 'discountable': False, 'period': { 'end': datetime_to_timestamp(self.next_year), 'start': datetime_to_timestamp(self.now)}, 'plan': None, 'proration': False, 'quantity': 123} for key, value in line_item_params.items(): self.assertEqual(stripe_line_items[0].get(key), value) # Check that we correctly populated Customer and CustomerPlan in Zulip customer = Customer.objects.filter(stripe_customer_id=stripe_customer.id, realm=user.realm).first() self.assertTrue(CustomerPlan.objects.filter( customer=customer, licenses=123, automanage_licenses=False, charge_automatically=False, price_per_license=8000, fixed_price=None, discount=None, billing_cycle_anchor=self.now, billing_schedule=CustomerPlan.ANNUAL, billed_through=self.now, next_billing_date=self.next_year, tier=CustomerPlan.STANDARD, status=CustomerPlan.ACTIVE).exists()) # Check RealmAuditLog audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user) .values_list('event_type', 'event_time').order_by('id')) self.assertEqual(audit_log_entries, [ (RealmAuditLog.STRIPE_CUSTOMER_CREATED, timestamp_to_datetime(stripe_customer.created)), (RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now), # TODO: Check for REALM_PLAN_TYPE_CHANGED # (RealmAuditLog.REALM_PLAN_TYPE_CHANGED, Kandra()), ]) self.assertEqual(ujson.loads(RealmAuditLog.objects.filter( event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list( 'extra_data', flat=True).first())['licenses'], 123) # Check that we correctly updated Realm realm = get_realm("zulip") self.assertEqual(realm.plan_type, Realm.STANDARD) self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX) # Check that we can no longer access /upgrade response = self.client_get("/upgrade/") self.assertEqual(response.status_code, 302) self.assertEqual('/billing/', response.url) # Check /billing has the correct information response = self.client_get("/billing/") self.assert_not_in_success_response(['Pay annually', 'Update card'], response) for substring in [ 'Zulip Standard', str(123), 'Your plan will renew on', 'January 2, 2013', '$9,840.00', # 9840 = 80 * 123 'Billed by invoice']: self.assert_in_response(substring, response) @mock_stripe() def test_billing_page_permissions(self, *mocks: Mock) -> None: # Check that non-admins can access /upgrade via /billing, when there is no Customer object self.login(self.example_email('hamlet')) response = self.client_get("/billing/") self.assertEqual(response.status_code, 302) self.assertEqual('/upgrade/', response.url) # Check that non-admins can sign up and pay self.upgrade() # Check that the non-admin hamlet can still access /billing response = self.client_get("/billing/") self.assert_in_success_response(["for billing history or to make changes"], response) # Check admins can access billing, even though they are not a billing admin self.login(self.example_email('iago')) response = self.client_get("/billing/") self.assert_in_success_response(["for billing history or to make changes"], response) # Check that a non-admin, non-billing admin user does not have access self.login(self.example_email("cordelia")) response = self.client_get("/billing/") self.assert_in_success_response(["You must be an organization administrator"], response) @mock_stripe(tested_timestamp_fields=["created"]) def test_upgrade_by_card_with_outdated_seat_count(self, *mocks: Mock) -> None: self.login(self.example_email("hamlet")) new_seat_count = 23 # Change the seat count while the user is going through the upgrade flow with patch('corporate.lib.stripe.get_seat_count', return_value=new_seat_count): self.upgrade() stripe_customer_id = Customer.objects.first().stripe_customer_id # Check that the Charge used the old quantity, not new_seat_count self.assertEqual(8000 * self.seat_count, [charge for charge in stripe.Charge.list(customer=stripe_customer_id)][0].amount) # Check that the invoice has a credit for the old amount and a charge for the new one stripe_invoice = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer_id)][0] self.assertEqual([8000 * new_seat_count, -8000 * self.seat_count], [item.amount for item in stripe_invoice.lines]) # Check CustomerPlan and RealmAuditLog have the new amount self.assertEqual(CustomerPlan.objects.first().licenses, new_seat_count) self.assertEqual(ujson.loads(RealmAuditLog.objects.filter( event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED).values_list( 'extra_data', flat=True).first())['licenses'], new_seat_count) @mock_stripe() def test_upgrade_where_first_card_fails(self, *mocks: Mock) -> None: user = self.example_user("hamlet") self.login(user.email) # From https://stripe.com/docs/testing#cards: Attaching this card to # a Customer object succeeds, but attempts to charge the customer fail. with patch("corporate.lib.stripe.billing_logger.error") as mock_billing_logger: self.upgrade(stripe_token=stripe_create_token('4000000000000341').id) mock_billing_logger.assert_called() # Check that we created a Customer object but no CustomerPlan stripe_customer_id = Customer.objects.get(realm=get_realm('zulip')).stripe_customer_id self.assertFalse(CustomerPlan.objects.exists()) # Check that we created a Customer in stripe, a failed Charge, and no Invoices or Invoice Items self.assertTrue(stripe_get_customer(stripe_customer_id)) stripe_charges = [charge for charge in stripe.Charge.list(customer=stripe_customer_id)] self.assertEqual(len(stripe_charges), 1) self.assertEqual(stripe_charges[0].failure_code, 'card_declined') # TODO: figure out what these actually are self.assertFalse(stripe.Invoice.list(customer=stripe_customer_id)) self.assertFalse(stripe.InvoiceItem.list(customer=stripe_customer_id)) # Check that we correctly populated RealmAuditLog audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user) .values_list('event_type', flat=True).order_by('id')) self.assertEqual(audit_log_entries, [RealmAuditLog.STRIPE_CUSTOMER_CREATED, RealmAuditLog.STRIPE_CARD_CHANGED]) # Check that we did not update Realm realm = get_realm("zulip") self.assertNotEqual(realm.plan_type, Realm.STANDARD) # Check that we still get redirected to /upgrade response = self.client_get("/billing/") self.assertEqual(response.status_code, 302) self.assertEqual('/upgrade/', response.url) # Try again, with a valid card, after they added a few users with patch('corporate.lib.stripe.get_seat_count', return_value=23): with patch('corporate.views.get_seat_count', return_value=23): self.upgrade() customer = Customer.objects.get(realm=get_realm('zulip')) # It's impossible to create two Customers, but check that we didn't # change stripe_customer_id self.assertEqual(customer.stripe_customer_id, stripe_customer_id) # Check that we successfully added a CustomerPlan self.assertTrue(CustomerPlan.objects.filter(customer=customer, licenses=23).exists()) # Check the Charges and Invoices in Stripe self.assertEqual(8000 * 23, [charge for charge in stripe.Charge.list(customer=stripe_customer_id)][0].amount) stripe_invoice = [invoice for invoice in stripe.Invoice.list(customer=stripe_customer_id)][0] self.assertEqual([8000 * 23, -8000 * 23], [item.amount for item in stripe_invoice.lines]) # Check that we correctly populated RealmAuditLog audit_log_entries = list(RealmAuditLog.objects.filter(acting_user=user) .values_list('event_type', flat=True).order_by('id')) # TODO: Test for REALM_PLAN_TYPE_CHANGED as the last entry self.assertEqual(audit_log_entries, [RealmAuditLog.STRIPE_CUSTOMER_CREATED, RealmAuditLog.STRIPE_CARD_CHANGED, RealmAuditLog.STRIPE_CARD_CHANGED, RealmAuditLog.CUSTOMER_PLAN_CREATED]) # Check that we correctly updated Realm realm = get_realm("zulip") self.assertEqual(realm.plan_type, Realm.STANDARD) # Check that we can no longer access /upgrade response = self.client_get("/upgrade/") self.assertEqual(response.status_code, 302) self.assertEqual('/billing/', response.url) def test_upgrade_with_tampered_seat_count(self) -> None: self.login(self.example_email("hamlet")) response = self.upgrade(talk_to_stripe=False, salt='badsalt') self.assert_json_error_contains(response, "Something went wrong. Please contact") self.assertEqual(ujson.loads(response.content)['error_description'], 'tampered seat count') def test_check_upgrade_parameters(self) -> None: # Tests all the error paths except 'not enough licenses' def check_error(error_description: str, upgrade_params: Dict[str, Any], del_args: List[str]=[]) -> None: response = self.upgrade(talk_to_stripe=False, del_args=del_args, **upgrade_params) self.assert_json_error_contains(response, "Something went wrong. Please contact") self.assertEqual(ujson.loads(response.content)['error_description'], error_description) self.login(self.example_email("hamlet")) check_error('unknown billing_modality', {'billing_modality': 'invalid'}) check_error('unknown schedule', {'schedule': 'invalid'}) check_error('unknown license_management', {'license_management': 'invalid'}) check_error('autopay with no card', {}, del_args=['stripe_token']) def test_upgrade_license_counts(self) -> None: def check_error(invoice: bool, licenses: Optional[int], min_licenses_in_response: int, upgrade_params: Dict[str, Any]={}) -> None: if licenses is None: del_args = ['licenses'] else: del_args = [] upgrade_params['licenses'] = licenses response = self.upgrade(invoice=invoice, talk_to_stripe=False, del_args=del_args, **upgrade_params) self.assert_json_error_contains(response, "at least {} users".format(min_licenses_in_response)) self.assertEqual(ujson.loads(response.content)['error_description'], 'not enough licenses') def check_success(invoice: bool, licenses: Optional[int], upgrade_params: Dict[str, Any]={}) -> None: if licenses is None: del_args = ['licenses'] else: del_args = [] upgrade_params['licenses'] = licenses with patch('corporate.views.process_initial_upgrade'): response = self.upgrade(invoice=invoice, talk_to_stripe=False, del_args=del_args, **upgrade_params) self.assert_json_success(response) self.login(self.example_email("hamlet")) # Autopay with licenses < seat count check_error(False, self.seat_count - 1, self.seat_count, {'license_management': 'manual'}) # Autopay with not setting licenses check_error(False, None, self.seat_count, {'license_management': 'manual'}) # Invoice with licenses < MIN_INVOICED_LICENSES check_error(True, MIN_INVOICED_LICENSES - 1, MIN_INVOICED_LICENSES) # Invoice with licenses < seat count with patch("corporate.views.MIN_INVOICED_LICENSES", 3): check_error(True, 4, self.seat_count) # Invoice with not setting licenses check_error(True, None, MIN_INVOICED_LICENSES) # Autopay with automatic license_management check_success(False, None) # Autopay with automatic license_management, should just ignore the licenses entry check_success(False, self.seat_count) # Autopay check_success(False, self.seat_count, {'license_management': 'manual'}) check_success(False, self.seat_count + 10, {'license_management': 'mix'}) # Invoice check_success(True, self.seat_count + MIN_INVOICED_LICENSES) @patch("corporate.lib.stripe.billing_logger.error") def test_upgrade_with_uncaught_exception(self, mock_: Mock) -> None: self.login(self.example_email("hamlet")) with patch("corporate.views.process_initial_upgrade", side_effect=Exception): response = self.upgrade(talk_to_stripe=False) self.assert_json_error_contains(response, "Something went wrong. Please contact [email protected].") self.assertEqual(ujson.loads(response.content)['error_description'], 'uncaught exception during upgrade') def test_redirect_for_billing_home(self) -> None: user = self.example_user("iago") self.login(user.email) # No Customer yet; check that we are redirected to /upgrade response = self.client_get("/billing/") self.assertEqual(response.status_code, 302) self.assertEqual('/upgrade/', response.url) # Customer, but no billing relationship; check that we are still redirected to /upgrade Customer.objects.create( realm=user.realm, stripe_customer_id='cus_123', has_billing_relationship=False) response = self.client_get("/billing/") self.assertEqual(response.status_code, 302) self.assertEqual('/upgrade/', response.url) def test_get_seat_count(self) -> None: realm = get_realm("zulip") initial_count = get_seat_count(realm) user1 = UserProfile.objects.create(realm=realm, email='[email protected]', pointer=-1) user2 = UserProfile.objects.create(realm=realm, email='[email protected]', pointer=-1) self.assertEqual(get_seat_count(realm), initial_count + 2) # Test that bots aren't counted user1.is_bot = True user1.save(update_fields=['is_bot']) self.assertEqual(get_seat_count(realm), initial_count + 1) # Test that inactive users aren't counted do_deactivate_user(user2) self.assertEqual(get_seat_count(realm), initial_count) def test_sign_string(self) -> None: string = "abc" signed_string, salt = sign_string(string) self.assertEqual(string, unsign_string(signed_string, salt)) with self.assertRaises(signing.BadSignature): unsign_string(signed_string, "randomsalt") # This tests both the payment method string, and also is a very basic # test that the various upgrade paths involving non-standard payment # histories don't throw errors @mock_stripe() def test_payment_method_string(self, *mocks: Mock) -> None: pass # If you signup with a card, we should show your card as the payment method # Already tested in test_initial_upgrade # If you pay by invoice, your payment method should be # "Billed by invoice", even if you have a card on file # user = self.example_user("hamlet") # do_create_customer(user, stripe_create_token().id) # self.login(user.email) # self.upgrade(invoice=True) # stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id) # self.assertEqual('Billed by invoice', payment_method_string(stripe_customer)) # If you signup with a card and then downgrade, we still have your # card on file, and should show it # TODO @mock_stripe() def test_attach_discount_to_realm(self, *mocks: Mock) -> None: # Attach discount before Stripe customer exists user = self.example_user('hamlet') attach_discount_to_realm(user, Decimal(85)) self.login(user.email) # Check that the discount appears in page_params self.assert_in_success_response(['85'], self.client_get("/upgrade/")) # Check that the customer was charged the discounted amount # TODO # Check upcoming invoice reflects the discount # TODO # Attach discount to existing Stripe customer attach_discount_to_realm(user, Decimal(25)) # Check upcoming invoice reflects the new discount # TODO @mock_stripe() def test_replace_payment_source(self, *mocks: Mock) -> None: user = self.example_user("hamlet") self.login(user.email) self.upgrade() # Try replacing with a valid card stripe_token = stripe_create_token(card_number='5555555555554444').id response = self.client_post("/json/billing/sources/change", {'stripe_token': ujson.dumps(stripe_token)}) self.assert_json_success(response) number_of_sources = 0 for stripe_source in stripe_get_customer(Customer.objects.first().stripe_customer_id).sources: self.assertEqual(cast(stripe.Card, stripe_source).last4, '4444') number_of_sources += 1 self.assertEqual(number_of_sources, 1) audit_log_entry = RealmAuditLog.objects.order_by('-id') \ .values_list('acting_user', 'event_type').first() self.assertEqual(audit_log_entry, (user.id, RealmAuditLog.STRIPE_CARD_CHANGED)) RealmAuditLog.objects.filter(acting_user=user).delete() # Try replacing with an invalid card stripe_token = stripe_create_token(card_number='4000000000009987').id with patch("corporate.lib.stripe.billing_logger.error") as mock_billing_logger: response = self.client_post("/json/billing/sources/change", {'stripe_token': ujson.dumps(stripe_token)}) mock_billing_logger.assert_called() self.assertEqual(ujson.loads(response.content)['error_description'], 'card error') self.assert_json_error_contains(response, 'Your card was declined') number_of_sources = 0 for stripe_source in stripe_get_customer(Customer.objects.first().stripe_customer_id).sources: self.assertEqual(cast(stripe.Card, stripe_source).last4, '4444') number_of_sources += 1 self.assertEqual(number_of_sources, 1) self.assertFalse(RealmAuditLog.objects.filter(event_type=RealmAuditLog.STRIPE_CARD_CHANGED).exists()) class RequiresBillingAccessTest(ZulipTestCase): def setUp(self) -> None: hamlet = self.example_user("hamlet") hamlet.is_billing_admin = True hamlet.save(update_fields=["is_billing_admin"]) # mocked_function_name will typically be something imported from # stripe.py. In theory we could have endpoints that need to mock # multiple functions, but we'll cross that bridge when we get there. def _test_endpoint(self, url: str, mocked_function_name: str, request_data: Optional[Dict[str, Any]]={}) -> None: # Normal users do not have access self.login(self.example_email('cordelia')) response = self.client_post(url, request_data) self.assert_json_error_contains(response, "Must be a billing administrator or an organization") # Billing admins have access self.login(self.example_email('hamlet')) with patch("corporate.views.{}".format(mocked_function_name)) as mocked1: response = self.client_post(url, request_data) self.assert_json_success(response) mocked1.assert_called() # Realm admins have access, even if they are not billing admins self.login(self.example_email('iago')) with patch("corporate.views.{}".format(mocked_function_name)) as mocked2: response = self.client_post(url, request_data) self.assert_json_success(response) mocked2.assert_called() def test_json_endpoints(self) -> None: params = [ ("/json/billing/sources/change", "do_replace_payment_source", {'stripe_token': ujson.dumps('token')}), # TODO: second argument should be something like "process_downgrade" ("/json/billing/downgrade", "process_downgrade", {}), ] # type: List[Tuple[str, str, Dict[str, Any]]] for (url, mocked_function_name, data) in params: self._test_endpoint(url, mocked_function_name, data) # Make sure that we are testing all the JSON endpoints # Quite a hack, but probably fine for now string_with_all_endpoints = str(get_resolver('corporate.urls').reverse_dict) json_endpoints = set([word.strip("\"'()[],$") for word in string_with_all_endpoints.split() if 'json' in word]) # No need to test upgrade endpoint as it only requires user to be logged in. json_endpoints.remove("json/billing/upgrade") self.assertEqual(len(json_endpoints), len(params)) class BillingHelpersTest(ZulipTestCase): def test_next_month(self) -> None: anchor = datetime(2019, 12, 31, 1, 2, 3).replace(tzinfo=timezone_utc) period_boundaries = [ anchor, datetime(2020, 1, 31, 1, 2, 3).replace(tzinfo=timezone_utc), # Test that this is the 28th even during leap years datetime(2020, 2, 28, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 3, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 4, 30, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 5, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 6, 30, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 7, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 8, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 9, 30, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 10, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 11, 30, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2020, 12, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2021, 1, 31, 1, 2, 3).replace(tzinfo=timezone_utc), datetime(2021, 2, 28, 1, 2, 3).replace(tzinfo=timezone_utc)] with self.assertRaises(AssertionError): add_months(anchor, -1) # Explictly test add_months for each value of MAX_DAY_FOR_MONTH and # for crossing a year boundary for i, boundary in enumerate(period_boundaries): self.assertEqual(add_months(anchor, i), boundary) # Test next_month for small values for last, next_ in zip(period_boundaries[:-1], period_boundaries[1:]): self.assertEqual(next_month(anchor, last), next_) # Test next_month for large values period_boundaries = [dt.replace(year=dt.year+100) for dt in period_boundaries] for last, next_ in zip(period_boundaries[:-1], period_boundaries[1:]): self.assertEqual(next_month(anchor, last), next_) def test_compute_plan_parameters(self) -> None: # TODO: test rounding down microseconds anchor = datetime(2019, 12, 31, 1, 2, 3).replace(tzinfo=timezone_utc) month_later = datetime(2020, 1, 31, 1, 2, 3).replace(tzinfo=timezone_utc) year_later = datetime(2020, 12, 31, 1, 2, 3).replace(tzinfo=timezone_utc) test_cases = [ # TODO test with Decimal(85), not 85 # TODO fix the mypy error by specifying the exact type # test all possibilities, since there aren't that many [(True, CustomerPlan.ANNUAL, None), (anchor, month_later, year_later, 8000)], # lint:ignore [(True, CustomerPlan.ANNUAL, 85), (anchor, month_later, year_later, 1200)], # lint:ignore [(True, CustomerPlan.MONTHLY, None), (anchor, month_later, month_later, 800)], # lint:ignore [(True, CustomerPlan.MONTHLY, 85), (anchor, month_later, month_later, 120)], # lint:ignore [(False, CustomerPlan.ANNUAL, None), (anchor, year_later, year_later, 8000)], # lint:ignore [(False, CustomerPlan.ANNUAL, 85), (anchor, year_later, year_later, 1200)], # lint:ignore [(False, CustomerPlan.MONTHLY, None), (anchor, month_later, month_later, 800)], # lint:ignore [(False, CustomerPlan.MONTHLY, 85), (anchor, month_later, month_later, 120)], # lint:ignore # test exact math of Decimals; 800 * (1 - 87.25) = 101.9999999.. [(False, CustomerPlan.MONTHLY, 87.25), (anchor, month_later, month_later, 102)], # test dropping of fractional cents; without the int it's 102.8 [(False, CustomerPlan.MONTHLY, 87.15), (anchor, month_later, month_later, 102)]] with patch('corporate.lib.stripe.timezone_now', return_value=anchor): for input_, output in test_cases: output_ = compute_plan_parameters(*input_) # type: ignore # TODO self.assertEqual(output_, output) def test_update_or_create_stripe_customer_logic(self) -> None: user = self.example_user('hamlet') # No existing Customer object with patch('corporate.lib.stripe.do_create_customer', return_value='returned') as mocked1: returned = update_or_create_stripe_customer(user, stripe_token='token') mocked1.assert_called() self.assertEqual(returned, 'returned') # Customer exists, replace payment source Customer.objects.create(realm=get_realm('zulip'), stripe_customer_id='cus_12345') with patch('corporate.lib.stripe.do_replace_payment_source') as mocked2: customer = update_or_create_stripe_customer(self.example_user('hamlet'), 'token') mocked2.assert_called() self.assertTrue(isinstance(customer, Customer)) # Customer exists, do nothing with patch('corporate.lib.stripe.do_replace_payment_source') as mocked3: customer = update_or_create_stripe_customer(self.example_user('hamlet'), None) mocked3.assert_not_called() self.assertTrue(isinstance(customer, Customer))
apache-2.0
-1,860,914,461,190,684,000
54.520788
114
0.632661
false
hyperwd/hwcram
vpc/migrations/0023_auto_20170926_0016.py
1
1677
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-25 16:16 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vpc', '0022_auto_20170926_0005'), ] operations = [ migrations.AlterField( model_name='createip', name='bandwidth_charge_mode', field=models.CharField(blank=True, choices=[('traffic', '按流量计费'), ('bandwidth', '按带宽计费')], help_text="<font color='blue'>独享带宽填写</font>,<font color='red'>共享带宽留空</font>", max_length=10, null=True, verbose_name='带宽计费方式'), ), migrations.AlterField( model_name='createip', name='bandwidth_name', field=models.CharField(blank=True, help_text="<font color='blue'>独享带宽填写</font>,<font color='red'>共享带宽留空</font", max_length=128, null=True, verbose_name='带宽名称'), ), migrations.AlterField( model_name='createip', name='bandwidth_share_id', field=models.CharField(blank=True, help_text="<font color='blue'>独享带宽留空</font>,<font color='red'>共享带宽填写</font>", max_length=40, null=True, verbose_name='共享带宽ID'), ), migrations.AlterField( model_name='createip', name='bandwidth_size', field=models.IntegerField(blank=True, help_text="<font color='blue'>独享带宽,填写数字,范围1~300M</font>,<font color='red'>共享带宽留空</font>", null=True, verbose_name='带宽大小'), ), ]
mit
-2,558,818,778,893,020,000
42
230
0.608638
false
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_followtest.py
1
5446
#!/usr/bin/env python ''' test follow-me options in ArduPilot Andrew Tridgell September 2016 ''' import sys, os, time, math from MAVProxy.modules.lib import mp_module from MAVProxy.modules.lib import mp_util from MAVProxy.modules.lib import mp_settings from MAVProxy.modules.mavproxy_map import mp_slipmap from pymavlink import mavutil if mp_util.has_wxpython: from MAVProxy.modules.lib.mp_menu import * class FollowTestModule(mp_module.MPModule): def __init__(self, mpstate): super(FollowTestModule, self).__init__(mpstate, "followtest", "followtest module") self.add_command('followtest', self.cmd_followtest, "followtest control", ['set (FOLLOWSETTING)']) self.follow_settings = mp_settings.MPSettings([("radius", float, 100.0), ("altitude", float, 50.0), ("speed", float, 10.0), ("type", str, 'guided'), ("vehicle_throttle", float, 0.5), ("disable_msg", bool, False)]) self.add_completion_function('(FOLLOWSETTING)', self.follow_settings.completion) self.target_pos = None self.last_update = 0 self.circle_dist = 0 def cmd_followtest(self, args): '''followtest command parser''' usage = "usage: followtest <set>" if len(args) == 0: print(usage) return if args[0] == "set": self.follow_settings.command(args[1:]) else: print(usage) def update_target(self, time_boot_ms): '''update target on map''' if not self.mpstate.map: # don't draw if no map return if not 'HOME_POSITION' in self.master.messages: return home_position = self.master.messages['HOME_POSITION'] now = time_boot_ms * 1.0e-3 dt = now - self.last_update if dt < 0: dt = 0 self.last_update = now self.circle_dist += dt * self.follow_settings.speed # assume a circle for now circumference = math.pi * self.follow_settings.radius * 2 rotations = math.fmod(self.circle_dist, circumference) / circumference angle = math.pi * 2 * rotations self.target_pos = mp_util.gps_newpos(home_position.latitude*1.0e-7, home_position.longitude*1.0e-7, math.degrees(angle), self.follow_settings.radius) icon = self.mpstate.map.icon('camera-small-red.png') (lat, lon) = (self.target_pos[0], self.target_pos[1]) self.mpstate.map.add_object(mp_slipmap.SlipIcon('followtest', (lat, lon), icon, layer='FollowTest', rotation=0, follow=False)) def idle_task(self): '''update vehicle position''' pass def mavlink_packet(self, m): '''handle an incoming mavlink packet''' if not self.mpstate.map: # don't draw if no map return if m.get_type() != 'GLOBAL_POSITION_INT': return self.update_target(m.time_boot_ms) if self.target_pos is None: return if self.follow_settings.disable_msg: return if self.follow_settings.type == 'guided': # send normal guided mode packet self.master.mav.mission_item_int_send(self.settings.target_system, self.settings.target_component, 0, self.module('wp').get_default_frame(), mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 2, 0, 0, 0, 0, 0, int(self.target_pos[0]*1.0e7), int(self.target_pos[1]*1.0e7), self.follow_settings.altitude) elif self.follow_settings.type == 'yaw': # display yaw from vehicle to target vehicle = (m.lat*1.0e-7, m.lon*1.0e-7) vehicle_yaw = math.degrees(self.master.field('ATTITUDE', 'yaw', 0)) target_bearing = mp_util.gps_bearing(vehicle[0], vehicle[1], self.target_pos[0], self.target_pos[1]) # wrap the angle from -180 to 180 thus commanding the vehicle to turn left or right # note its in centi-degrees so *100 relyaw = mp_util.wrap_180(target_bearing - vehicle_yaw) * 100 self.master.mav.command_long_send(self.settings.target_system, self.settings.target_component, mavutil.mavlink.MAV_CMD_NAV_SET_YAW_SPEED, 0, relyaw, self.follow_settings.vehicle_throttle, 0, 0, 0, 0, 0) def init(mpstate): '''initialise module''' return FollowTestModule(mpstate)
gpl-3.0
609,618,725,096,284,900
41.546875
112
0.493757
false
GrahamDennis/xpdeint
xpdeint/waf/waflib/extras/msvs.py
1
29952
#! /usr/bin/env python # encoding: utf-8 # Avalanche Studios 2009-2011 # Thomas Nagy 2011 """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ To add this tool to your project: def options(conf): opt.load('msvs') It can be a good idea to add the sync_exec tool too. To generate solution files: $ waf configure msvs To customize the outputs, provide subclasses in your wscript files: from waflib.extras import msvs class vsnode_target(msvs.vsnode_target): def get_build_command(self, props): # likely to be required return "waf.bat build" def collect_source(self): # likely to be required ... class msvs_bar(msvs.msvs_generator): def init(self): msvs.msvs_generator.init(self) self.vsnode_target = vsnode_target The msvs class re-uses the same build() function for reading the targets (task generators), you may therefore specify msvs settings on the context object: def build(bld): bld.solution_name = 'foo.sln' bld.waf_command = 'waf.bat' bld.projects_dir = bld.srcnode.make_node('.depproj') bld.projects_dir.mkdir() For visual studio 2008, the command is called 'msvs2008', and the classes such as vsnode_target are wrapped by a decorator class 'wrap_2008' to provide special functionality. ASSUMPTIONS: * a project can be either a directory or a target, vcxproj files are written only for targets that have source files * each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path """ import os, re, sys import uuid # requires python 2.5 from waflib.Build import BuildContext from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' PROJECT_TEMPLATE = r'''<?xml version="1.0" encoding="UTF-8"?> <Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ItemGroup Label="ProjectConfigurations"> ${for b in project.build_properties} <ProjectConfiguration Include="${b.configuration}|${b.platform}"> <Configuration>${b.configuration}</Configuration> <Platform>${b.platform}</Platform> </ProjectConfiguration> ${endfor} </ItemGroup> <PropertyGroup Label="Globals"> <ProjectGuid>{${project.uuid}}</ProjectGuid> <Keyword>MakeFileProj</Keyword> <ProjectName>${project.name}</ProjectName> </PropertyGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> ${for b in project.build_properties} <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'" Label="Configuration"> <ConfigurationType>Makefile</ConfigurationType> <OutDir>${b.outdir}</OutDir> </PropertyGroup> ${endfor} <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> <ImportGroup Label="ExtensionSettings"> </ImportGroup> ${for b in project.build_properties} <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'"> <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> </ImportGroup> ${endfor} ${for b in project.build_properties} <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'"> <NMakeBuildCommandLine>${xml:project.get_build_command(b)}</NMakeBuildCommandLine> <NMakeReBuildCommandLine>${xml:project.get_rebuild_command(b)}</NMakeReBuildCommandLine> <NMakeCleanCommandLine>${xml:project.get_clean_command(b)}</NMakeCleanCommandLine> <NMakeIncludeSearchPath>${xml:b.includes_search_path}</NMakeIncludeSearchPath> <NMakePreprocessorDefinitions>${xml:b.preprocessor_definitions};$(NMakePreprocessorDefinitions)</NMakePreprocessorDefinitions> <IncludePath>${xml:b.includes_search_path}</IncludePath> <ExecutablePath>$(ExecutablePath)</ExecutablePath> ${if getattr(b, 'output_file', None)} <NMakeOutput>${xml:b.output_file}</NMakeOutput> ${endif} ${if getattr(b, 'deploy_dir', None)} <RemoteRoot>${xml:b.deploy_dir}</RemoteRoot> ${endif} </PropertyGroup> ${endfor} ${for b in project.build_properties} ${if getattr(b, 'deploy_dir', None)} <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'"> <Deploy> <DeploymentType>CopyToHardDrive</DeploymentType> </Deploy> </ItemDefinitionGroup> ${endif} ${endfor} <ItemGroup> ${for x in project.source} <${project.get_key(x)} Include='${x.abspath()}' /> ${endfor} </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <ImportGroup Label="ExtensionTargets"> </ImportGroup> </Project> ''' FILTER_TEMPLATE = '''<?xml version="1.0" encoding="UTF-8"?> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <ItemGroup> ${for x in project.source} <${project.get_key(x)} Include="${x.abspath()}"> <Filter>${project.get_filter_name(x.parent)}</Filter> </${project.get_key(x)}> ${endfor} </ItemGroup> <ItemGroup> ${for x in project.dirs()} <Filter Include="${project.get_filter_name(x)}"> <UniqueIdentifier>{${project.make_uuid(x.abspath())}}</UniqueIdentifier> </Filter> ${endfor} </ItemGroup> </Project> ''' PROJECT_2008_TEMPLATE = r'''<?xml version="1.0" encoding="UTF-8"?> <VisualStudioProject ProjectType="Visual C++" Version="9,00" Name="${xml: project.name}" ProjectGUID="{${project.uuid}}" Keyword="MakeFileProj" TargetFrameworkVersion="196613"> <Platforms> ${if project.build_properties} ${for b in project.build_properties} <Platform Name="${xml: b.platform}" /> ${endfor} ${else} <Platform Name="Win32" /> ${endif} </Platforms> <ToolFiles> </ToolFiles> <Configurations> ${if project.build_properties} ${for b in project.build_properties} <Configuration Name="${xml: b.configuration}|${xml: b.platform}" IntermediateDirectory="$ConfigurationName" OutputDirectory="${xml: b.outdir}" ConfigurationType="0"> <Tool Name="VCNMakeTool" BuildCommandLine="${xml: project.get_build_command(b)}" ReBuildCommandLine="${xml: project.get_rebuild_command(b)}" CleanCommandLine="${xml: project.get_clean_command(b)}" ${if getattr(b, 'output_file', None)} Output="${xml: b.output_file}" ${endif} PreprocessorDefinitions="${xml: b.preprocessor_definitions}" IncludeSearchPath="${xml: b.includes_search_path}" ForcedIncludes="" ForcedUsingAssemblies="" AssemblySearchPath="" CompileAsManaged="" /> </Configuration> ${endfor} ${else} <Configuration Name="Release|Win32" > </Configuration> ${endif} </Configurations> <References> </References> <Files> ${project.display_filter()} </Files> </VisualStudioProject> ''' SOLUTION_TEMPLATE = '''Microsoft Visual Studio Solution File, Format Version ${project.numver} # Visual Studio ${project.vsver} ${for p in project.all_projects} Project("{${p.ptype()}}") = "${p.name}", "${p.title}", "{${p.uuid}}" EndProject${endfor} Global GlobalSection(SolutionConfigurationPlatforms) = preSolution ${if project.all_projects} ${for (configuration, platform) in project.all_projects[0].ctx.project_configurations()} ${configuration}|${platform} = ${configuration}|${platform} ${endfor} ${endif} EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution ${for p in project.all_projects} ${if hasattr(p, 'source')} ${for b in p.build_properties} {${p.uuid}}.${b.configuration}|${b.platform}.ActiveCfg = ${b.configuration}|${b.platform} ${if getattr(p, 'is_active', None)} {${p.uuid}}.${b.configuration}|${b.platform}.Build.0 = ${b.configuration}|${b.platform} ${endif} ${endfor} ${endif} ${endfor} EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution ${for p in project.all_projects} ${if p.parent} {${p.uuid}} = {${p.parent.uuid}} ${endif} ${endfor} EndGlobalSection EndGlobal ''' COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&amp;").replace('"', "&quot;").replace("'", "&apos;").replace("<", "&lt;").replace(">", "&gt;") %s #f = open('cmd.txt', 'w') #f.write(str(lst)) #f.close() return ''.join(lst) ''' reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<code>[^}]*?)\})", re.M) def compile_template(line): """ Compile a template expression into a python function (like jsps, but way shorter) """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] dvars = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith('if') or f.startswith('for'): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith('endif') or f.startswith('endfor'): indent -= 1 elif f.startswith('else') or f.startswith('elif'): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(%s)' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) #print(fun) return Task.funex(fun) re_blank = re.compile('(\n|\r|\\s)*\n', re.M) def rm_blank_lines(txt): txt = re_blank.sub('\r\n', txt) return txt BOM = '\xef\xbb\xbf' try: BOM = bytes(BOM, 'iso8859-1') # python 3 except: pass def stealth_write(self, data, flags='wb'): try: x = unicode except: data = data.encode('utf-8') # python 3 else: data = data.decode(sys.getfilesystemencoding(), 'replace') data = data.encode('utf-8') if self.name.endswith('.vcproj') or self.name.endswith('.vcxproj'): data = BOM + data try: txt = self.read(flags='rb') if txt != data: raise ValueError('must write') except (IOError, ValueError): self.write(data, flags=flags) else: Logs.debug('msvs: skipping %s' % self.abspath()) Node.Node.stealth_write = stealth_write re_quote = re.compile("[^a-zA-Z0-9-]") def quote(s): return re_quote.sub("_", s) def xml_escape(value): return value.replace("&", "&amp;").replace('"', "&quot;").replace("'", "&apos;").replace("<", "&lt;").replace(">", "&gt;") def make_uuid(v, prefix = None): """ simple utility function """ if isinstance(v, dict): keys = list(v.keys()) keys.sort() tmp = str([(k, v[k]) for k in keys]) else: tmp = str(v) d = Utils.md5(tmp.encode()).hexdigest().upper() if prefix: d = '%s%s' % (prefix, d[8:]) gid = uuid.UUID(d, version = 4) return str(gid).upper() def diff(node, fromnode): # difference between two nodes, but with "(..)" instead of ".." c1 = node c2 = fromnode c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while id(c1) != id(c2): lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent for i in range(up): lst.append('(..)') lst.reverse() return tuple(lst) class build_property(object): pass class vsnode(object): """ Abstract class representing visual studio elements We assume that all visual studio nodes have a uuid and a parent """ def __init__(self, ctx): self.ctx = ctx # msvs context self.name = '' # string, mandatory self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) self.uuid = '' # string, mandatory self.parent = None # parent node for visual studio nesting def get_waf(self): """ Override in subclasses... """ return 'cd /d "%s" & %s' % (self.ctx.srcnode.abspath(), getattr(self.ctx, 'waf_command', 'waf.bat')) def ptype(self): """ Return a special uuid for projects written in the solution file """ pass def write(self): """ Write the project file, by default, do nothing """ pass def make_uuid(self, val): """ Alias for creating uuid values easily (the templates cannot access global variables) """ return make_uuid(val) class vsnode_vsdir(vsnode): """ Nodes representing visual studio folders (which do not match the filesystem tree!) """ VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" def __init__(self, ctx, uuid, name, vspath=''): vsnode.__init__(self, ctx) self.title = self.name = name self.uuid = uuid self.vspath = vspath or name def ptype(self): return self.VS_GUID_SOLUTIONFOLDER class vsnode_project(vsnode): """ Abstract class representing visual studio project elements A project is assumed to be writable, and has a node representing the file to write to """ VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" def ptype(self): return self.VS_GUID_VCPROJ def __init__(self, ctx, node): vsnode.__init__(self, ctx) self.path = node self.uuid = make_uuid(node.abspath()) self.name = node.name self.title = self.path.abspath() self.source = [] # list of node objects self.build_properties = [] # list of properties (nmake commands, output dir, etc) def dirs(self): """ Get the list of parent folders of the source files (header files included) for writing the filters """ lst = [] def add(x): if x.height() > self.tg.path.height() and x not in lst: lst.append(x) add(x.parent) for x in self.source: add(x.parent) return lst def write(self): Logs.debug('msvs: creating %r' % self.path) # first write the project file template1 = compile_template(PROJECT_TEMPLATE) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) # then write the filter template2 = compile_template(FILTER_TEMPLATE) filter_str = template2(self) filter_str = rm_blank_lines(filter_str) tmp = self.path.parent.make_node(self.path.name + '.filters') tmp.stealth_write(filter_str) def get_key(self, node): """ required for writing the source files """ name = node.name if name.endswith('.cpp') or name.endswith('.c'): return 'ClCompile' return 'ClInclude' def collect_properties(self): """ Returns a list of triplet (configuration, platform, output_directory) """ ret = [] for c in self.ctx.configurations: for p in self.ctx.platforms: x = build_property() x.outdir = '' x.configuration = c x.platform = p x.preprocessor_definitions = '' x.includes_search_path = '' # can specify "deploy_dir" too ret.append(x) self.build_properties = ret def get_build_params(self, props): opt = '--execsolution=%s' % self.ctx.get_solution_node().abspath() return (self.get_waf(), opt) def get_build_command(self, props): return "%s build %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build %s" % self.get_build_params(props) def get_filter_name(self, node): lst = diff(node, self.tg.path) return '\\'.join(lst) or '.' class vsnode_alias(vsnode_project): def __init__(self, ctx, node, name): vsnode_project.__init__(self, ctx, node) self.name = name self.output_file = '' class vsnode_build_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) This is the only alias enabled by default """ def __init__(self, ctx, node, name='build_all_projects'): vsnode_alias.__init__(self, ctx, node, name) self.is_active = True class vsnode_install_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make install" """ def __init__(self, ctx, node, name='install_all_projects'): vsnode_alias.__init__(self, ctx, node, name) def get_build_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build install %s" % self.get_build_params(props) class vsnode_project_view(vsnode_alias): """ Fake target used to emulate a file system view """ def __init__(self, ctx, node, name='project_view'): vsnode_alias.__init__(self, ctx, node, name) self.tg = self.ctx() # fake one, cannot remove self.exclude_files = Node.exclude_regs + ''' waf-1.6.* waf3-1.6.*/** .waf-1.6.* .waf3-1.6.*/** **/*.sdf **/*.suo **/*.ncb **/%s ''' % Options.lockfile def collect_source(self): # this is likely to be slow self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) def get_build_command(self, props): params = self.get_build_params(props) + (self.ctx.cmd,) return "%s %s %s" % params def get_clean_command(self, props): return "" def get_rebuild_command(self, props): return self.get_build_command(props) class vsnode_target(vsnode_project): """ Visual studio project representing a targets (programs, libraries, etc) and bound to a task generator """ def __init__(self, ctx, tg): """ A project is more or less equivalent to a file/folder """ base = getattr(ctx, 'projects_dir', None) or tg.path node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node vsnode_project.__init__(self, ctx, node) self.name = quote(tg.name) self.tg = tg # task generator def get_build_params(self, props): """ Override the default to add the target name """ opt = '--execsolution=%s' % self.ctx.get_solution_node().abspath() if getattr(self, 'tg', None): opt += " --targets=%s" % self.tg.name return (self.get_waf(), opt) def collect_source(self): tg = self.tg source_files = tg.to_nodes(getattr(tg, 'source', [])) include_dirs = Utils.to_list(getattr(tg, 'msvs_includes', [])) include_files = [] for x in include_dirs: if isinstance(x, str): x = tg.path.find_node(x) if x: lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] include_files.extend(lst) # remove duplicates self.source.extend(list(set(source_files + include_files))) self.source.sort(key=lambda x: x.abspath()) def collect_properties(self): """ Visual studio projects are associated with platforms and configurations (for building especially) """ super(vsnode_target, self).collect_properties() for x in self.build_properties: x.outdir = self.path.parent.abspath() x.preprocessor_definitions = '' x.includes_search_path = '' try: tsk = self.tg.link_task except AttributeError: pass else: x.output_file = tsk.outputs[0].abspath() x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) x.includes_search_path = ';'.join(self.tg.env.INCPATHS) class msvs_generator(BuildContext): '''generates a visual studio 2010 solution''' cmd = 'msvs' fun = 'build' def init(self): """ Some data that needs to be present """ if not getattr(self, 'configurations', None): self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc if not getattr(self, 'platforms', None): self.platforms = ['Win32'] if not getattr(self, 'all_projects', None): self.all_projects = [] if not getattr(self, 'project_extension', None): self.project_extension = '.vcxproj' if not getattr(self, 'projects_dir', None): self.projects_dir = self.srcnode.make_node('.depproj') self.projects_dir.mkdir() # bind the classes to the object, so that subclass can provide custom generators if not getattr(self, 'vsnode_vsdir', None): self.vsnode_vsdir = vsnode_vsdir if not getattr(self, 'vsnode_target', None): self.vsnode_target = vsnode_target if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = vsnode_build_all if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = vsnode_install_all if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = vsnode_project_view self.numver = '11.00' self.vsver = '2010' def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) # user initialization self.init() # two phases for creating the solution self.collect_projects() # add project objects into "self.all_projects" self.write_files() # write the corresponding project and solution files def collect_projects(self): """ Fill the list self.all_projects with project objects Fill the list of build targets """ self.collect_targets() self.add_aliases() self.collect_dirs() self.all_projects.sort(key=lambda x: getattr(x, 'path', None) and x.path.abspath() or x.name) def write_files(self): """ Write the project and solution files from the data collected so far. It is unlikely that you will want to change this """ for p in self.all_projects: p.write() # and finally write the solution file node = self.get_solution_node() node.parent.mkdir() Logs.warn('Creating %r' % node) template1 = compile_template(SOLUTION_TEMPLATE) sln_str = template1(self) sln_str = rm_blank_lines(sln_str) node.stealth_write(sln_str) def get_solution_node(self): """ The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one """ try: return self.solution_node except: pass solution_name = getattr(self, 'solution_name', None) if not solution_name: solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.sln' if os.path.isabs(solution_name): self.solution_node = self.root.make_node(solution_name) else: self.solution_node = self.srcnode.make_node(solution_name) return self.solution_node def project_configurations(self): """ Helper that returns all the pairs (config,platform) """ ret = [] for c in self.configurations: for p in self.platforms: ret.append((c, p)) return ret def collect_targets(self): """ Process the list of task generators """ for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue if not hasattr(tg, 'msvs_includes'): tg.msvs_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) tg.post() if not getattr(tg, 'link_task', None): continue p = self.vsnode_target(self, tg) p.collect_source() # delegate this processing p.collect_properties() self.all_projects.append(p) def add_aliases(self): """ Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 We also add an alias for "make install" (disabled by default) """ base = getattr(self, 'projects_dir', None) or self.tg.path node_project = base.make_node('build_all_projects' + self.project_extension) # Node p_build = self.vsnode_build_all(self, node_project) p_build.collect_properties() self.all_projects.append(p_build) node_project = base.make_node('install_all_projects' + self.project_extension) # Node p_install = self.vsnode_install_all(self, node_project) p_install.collect_properties() self.all_projects.append(p_install) node_project = base.make_node('project_view' + self.project_extension) # Node p_view = self.vsnode_project_view(self, node_project) p_view.collect_source() p_view.collect_properties() self.all_projects.append(p_view) n = self.vsnode_vsdir(self, make_uuid(self.srcnode.abspath() + 'build_aliases'), "build_aliases") p_build.parent = p_install.parent = p_view.parent = n self.all_projects.append(n) def collect_dirs(self): """ Create the folder structure in the Visual studio project view """ seen = {} def make_parents(proj): # look at a project, try to make a parent if getattr(proj, 'parent', None): # aliases already have parents return x = proj.iter_path if x in seen: proj.parent = seen[x] return # There is not vsnode_vsdir for x. # So create a project representing the folder "x" n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.abspath()), x.name) n.iter_path = x.parent self.all_projects.append(n) # recurse up to the project directory if x.height() > self.srcnode.height() + 1: make_parents(n) for p in self.all_projects[:]: # iterate over a copy of all projects if not getattr(p, 'tg', None): # but only projects that have a task generator continue # make a folder for each task generator p.iter_path = p.tg.path make_parents(p) def wrap_2008(cls): class dec(cls): def __init__(self, *k, **kw): cls.__init__(self, *k, **kw) self.project_template = PROJECT_2008_TEMPLATE def display_filter(self): root = build_property() root.subfilters = [] root.sourcefiles = [] root.source = [] root.name = '' @Utils.run_once def add_path(lst): if not lst: return root child = build_property() child.subfilters = [] child.sourcefiles = [] child.source = [] child.name = lst[-1] par = add_path(lst[:-1]) par.subfilters.append(child) return child for x in self.source: # this crap is for enabling subclasses to override get_filter_name tmp = self.get_filter_name(x.parent) tmp = tmp != '.' and tuple(tmp.split('\\')) or () par = add_path(tmp) par.source.append(x) def display(n): buf = [] for x in n.source: buf.append('<File RelativePath="%s" FileType="%s"/>\n' % (xml_escape(x.abspath()), self.get_key(x))) for x in n.subfilters: buf.append('<Filter Name="%s">' % xml_escape(x.name)) buf.append(display(x)) buf.append('</Filter>') return '\n'.join(buf) return display(root) def get_key(self, node): """ If you do not want to let visual studio use the default file extensions, override this method to return a value: 0: C/C++ Code, 1: C++ Class, 2: C++ Header File, 3: C++ Form, 4: C++ Control, 5: Text File, 6: DEF File, 7: IDL File, 8: Makefile, 9: RGS File, 10: RC File, 11: RES File, 12: XSD File, 13: XML File, 14: HTML File, 15: CSS File, 16: Bitmap, 17: Icon, 18: Resx File, 19: BSC File, 20: XSX File, 21: C++ Web Service, 22: ASAX File, 23: Asp Page, 24: Document, 25: Discovery File, 26: C# File, 27: eFileTypeClassDiagram, 28: MHTML Document, 29: Property Sheet, 30: Cursor, 31: Manifest, 32: eFileTypeRDLC """ return '' def write(self): Logs.debug('msvs: creating %r' % self.path) template1 = compile_template(self.project_template) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) return dec class msvs_2008_generator(msvs_generator): '''generates a visual studio 2008 solution''' cmd = 'msvs2008' fun = msvs_generator.fun def init(self): if not getattr(self, 'project_extension', None): self.project_extension = '_2008.vcproj' if not getattr(self, 'solution_name', None): self.solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '_2008.sln' if not getattr(self, 'vsnode_target', None): self.vsnode_target = wrap_2008(vsnode_target) if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = wrap_2008(vsnode_build_all) if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = wrap_2008(vsnode_install_all) if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = wrap_2008(vsnode_project_view) msvs_generator.init(self) self.numver = '10.00' self.vsver = '2008' def options(ctx): """ If the msvs option is used, try to detect if the build is made from visual studio """ ctx.add_option('--execsolution', action='store', help='when building with visual studio, use a build state file') old = BuildContext.execute def override_build_state(ctx): def lock(rm, add): uns = ctx.options.execsolution.replace('.sln', rm) uns = ctx.root.make_node(uns) try: uns.delete() except: pass uns = ctx.options.execsolution.replace('.sln', add) uns = ctx.root.make_node(uns) try: uns.write('') except: pass if ctx.options.execsolution: ctx.launch_dir = Context.top_dir # force a build for the whole project (invalid cwd when called by visual studio) lock('.lastbuildstate', '.unsuccessfulbuild') old(ctx) lock('.unsuccessfulbuild', '.lastbuildstate') else: old(ctx) BuildContext.execute = override_build_state
gpl-2.0
-3,358,031,452,174,001,700
28.451327
177
0.677217
false
Kalenai/url-shortener
tests/test_api.py
1
2727
from urllib.parse import urlparse from datetime import datetime, timedelta from flask import request from tests.base import BaseTestCase from app import db from app.models import UrlLink class TestApiService(BaseTestCase): """ Test the API for the URL Shortener Service. """ def test_should_return_original_and_new_url_when_passed_valid_url(self): url = "https://github.com/Kalenai/url-shortener" response = self.client.get("/new/" + url) self.assertEqual(response.status_code, 200) self.assertEqual(response.json, dict(original_url=url, short_url=request.url_root + UrlLink.query.filter_by(url=url).first().url_key)) def test_should_return_error_400_when_passed_invalid_url(self): url = "hello world!" response = self.client.get("/new/" + url) self.assertEqual(response.status_code, 400) def test_should_redirect_to_original_url_when_passed_valid_url_link(self): url = "https://github.com/Kalenai/url-shortener" url_key = "pretty-bird" url_link = UrlLink(url_key, url) db.session.add(url_link) db.session.commit() response = self.client.get("/" + url_key, follow_redirects=False) self.assertEqual(response.status_code, 302) self.assertEqual(urlparse(response.location).geturl(), url) def test_should_return_error_404_when_passed_invalid_url_link(self): url_key = "ugly-bird" url_link = UrlLink("pretty-bird", "https://github.com/Kalenai/url-shortener") db.session.add(url_link) db.session.commit() response = self.client.get("/" + url_key) self.assertEqual(response.status_code, 404) def test_should_initialize_created_date_to_today_when_url_link_is_created(self): url_key = "pretty-bird" url_link, created_time = UrlLink(url_key, "https://github.com/Kalenai/url-shortener"), datetime.utcnow() db.session.add(url_link) db.session.commit() self.assertTrue(UrlLink.query.filter_by(url_key=url_key).first().created - created_time < timedelta(seconds=10)) def test_should_update_last_used_date_in_db_when_url_link_is_used(self): url_key = "pretty-bird" url_link = UrlLink(url_key, "https://github.com/Kalenai/url-shortener") db.session.add(url_link) db.session.commit() self.assertTrue(UrlLink.query.filter_by(url_key=url_key).first().last_used is None) response = self.client.get("/" + url_key) self.assertTrue(UrlLink.query.filter_by(url_key=url_key).first().last_used - UrlLink.query.filter_by(url_key=url_key).first().created > timedelta(seconds=0))
mit
8,340,487,435,875,837,000
46.017241
124
0.654932
false
Lemma1/MAC-POSTS
src/setup.py
1
2581
import os import re import sys import platform import subprocess from setuptools import setup, Extension from setuptools.command.build_ext import build_ext from distutils.version import LooseVersion class CMakeExtension(Extension): def __init__(self, name, sourcedir=''): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) class CMakeBuild(build_ext): def run(self): try: out = subprocess.check_output(['cmake', '--version']) except OSError: raise RuntimeError("CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions)) if platform.system() == "Windows": cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1)) if cmake_version < '3.1.0': raise RuntimeError("CMake >= 3.1.0 is required on Windows") for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) # print "DEBUG", os.listdir(extdir) cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, '-DPYTHON_EXECUTABLE=' + sys.executable] cfg = 'Debug' if self.debug else 'Release' build_args = ['--config', cfg] if platform.system() == "Windows": cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] if sys.maxsize > 2**32: cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] build_args += ['--', '-j2'] env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version()) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) setup( name='MNMAPI', version='0.0.1', author='Wei Ma', author_email='[email protected]', description='A API library for MAC-POSTS (MNM)', long_description='', ext_modules=[CMakeExtension('MNMAPI')], cmdclass=dict(build_ext=CMakeBuild), zip_safe=False, )
mit
-3,688,370,340,519,691,000
35.871429
98
0.573809
false
chromium/chromium
tools/usb_gadget/hid_gadget_test.py
6
9450
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest import mock import hid_constants import hid_descriptors import hid_gadget import usb_constants report_desc = hid_descriptors.ReportDescriptor( hid_descriptors.UsagePage(0xFF00), # Vendor Defined hid_descriptors.Usage(0x00), hid_descriptors.Collection( hid_constants.CollectionType.APPLICATION, hid_descriptors.LogicalMinimum(0, force_length=1), hid_descriptors.LogicalMaximum(255, force_length=2), hid_descriptors.ReportSize(8), hid_descriptors.ReportCount(8), hid_descriptors.Input(hid_descriptors.Data, hid_descriptors.Variable, hid_descriptors.Absolute, hid_descriptors.BufferedBytes), hid_descriptors.Output(hid_descriptors.Data, hid_descriptors.Variable, hid_descriptors.Absolute, hid_descriptors.BufferedBytes), hid_descriptors.Feature(hid_descriptors.Data, hid_descriptors.Variable, hid_descriptors.Absolute, hid_descriptors.BufferedBytes) ) ) combo_report_desc = hid_descriptors.ReportDescriptor( hid_descriptors.ReportID(1), report_desc, hid_descriptors.ReportID(2), report_desc ) class HidGadgetTest(unittest.TestCase): def test_bad_intervals(self): with self.assertRaisesRegexp(ValueError, 'Full speed'): hid_gadget.HidGadget(report_desc, features={}, interval_ms=50000, vendor_id=0, product_id=0) with self.assertRaisesRegexp(ValueError, 'High speed'): hid_gadget.HidGadget(report_desc, features={}, interval_ms=5000, vendor_id=0, product_id=0) def test_get_string_descriptor(self): g = hid_gadget.HidGadget(report_desc=report_desc, features={}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) g.AddStringDescriptor(2, 'HID Gadget') desc = g.ControlRead(0x80, 6, 0x0302, 0x0409, 255) self.assertEquals(desc, '\x16\x03H\0I\0D\0 \0G\0a\0d\0g\0e\0t\0') def test_get_report_descriptor(self): g = hid_gadget.HidGadget(report_desc=report_desc, features={}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) desc = g.ControlRead(0x81, 6, 0x2200, 0, 63) self.assertEquals(desc, report_desc) def test_set_idle(self): g = hid_gadget.HidGadget(report_desc=report_desc, features={}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertTrue(g.ControlWrite(0x21, 0x0A, 0, 0, '')) def test_class_wrong_target(self): g = hid_gadget.HidGadget(report_desc=report_desc, features={}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertIsNone(g.ControlRead(0xA0, 0, 0, 0, 0)) # Device self.assertIsNone(g.ControlRead(0xA1, 0, 0, 1, 0)) # Interface 1 self.assertIsNone(g.ControlWrite(0x20, 0, 0, 0, '')) # Device self.assertIsNone(g.ControlWrite(0x21, 0, 0, 1, '')) # Interface 1 def test_send_report_zero(self): g = hid_gadget.HidGadget(report_desc=report_desc, features={}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) g.SendReport(0, 'Hello world!') chip.SendPacket.assert_called_once_with(0x81, 'Hello world!') def test_send_multiple_reports(self): g = hid_gadget.HidGadget(report_desc=report_desc, features={}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) g.SendReport(1, 'Hello!') g.SendReport(2, 'World!') chip.SendPacket.assert_has_calls([ mock.call(0x81, '\x01Hello!'), mock.call(0x81, '\x02World!'), ]) class TestFeature(hid_gadget.HidFeature): def SetInputReport(self, data): self.input_report = data return True def SetOutputReport(self, data): self.output_report = data return True def SetFeatureReport(self, data): self.feature_report = data return True def GetInputReport(self): return 'Input report.' def GetOutputReport(self): return 'Output report.' def GetFeatureReport(self): return 'Feature report.' class HidFeatureTest(unittest.TestCase): def test_disconnected(self): feature = TestFeature() with self.assertRaisesRegexp(RuntimeError, 'not connected'): feature.SendReport('Hello world!') def test_send_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) feature.SendReport('Hello world!') chip.SendPacket.assert_called_once_with(0x81, '\x01Hello world!') g.Disconnected() def test_get_bad_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertIsNone(g.ControlRead(0xA1, 1, 0x0102, 0, 8)) def test_set_bad_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertIsNone(g.ControlWrite(0x21, 0x09, 0x0102, 0, 'Hello!')) def test_get_input_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) report = g.ControlRead(0xA1, 1, 0x0101, 0, 8) self.assertEquals(report, 'Input re') def test_set_input_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertTrue(g.ControlWrite(0x21, 0x09, 0x0101, 0, 'Hello!')) self.assertEquals(feature.input_report, 'Hello!') def test_get_output_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) report = g.ControlRead(0xA1, 1, 0x0201, 0, 8) self.assertEquals(report, 'Output r') def test_set_output_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertTrue(g.ControlWrite(0x21, 0x09, 0x0201, 0, 'Hello!')) self.assertEquals(feature.output_report, 'Hello!') def test_receive_interrupt(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) g.SetConfiguration(1) g.ReceivePacket(0x01, '\x01Hello!') self.assertFalse(chip.HaltEndpoint.called) self.assertEquals(feature.output_report, 'Hello!') def test_receive_interrupt_report_zero(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={0: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) g.SetConfiguration(1) g.ReceivePacket(0x01, 'Hello!') self.assertFalse(chip.HaltEndpoint.called) self.assertEquals(feature.output_report, 'Hello!') def test_receive_bad_interrupt(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) g.SetConfiguration(1) g.ReceivePacket(0x01, '\x00Hello!') chip.HaltEndpoint.assert_called_once_with(0x01) def test_get_feature_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) report = g.ControlRead(0xA1, 1, 0x0301, 0, 8) self.assertEquals(report, 'Feature ') def test_set_feature_report(self): feature = TestFeature() g = hid_gadget.HidGadget(report_desc, features={1: feature}, vendor_id=0, product_id=0) chip = mock.Mock() g.Connected(chip, usb_constants.Speed.HIGH) self.assertTrue(g.ControlWrite(0x21, 0x09, 0x0301, 0, 'Hello!')) self.assertEquals(feature.feature_report, 'Hello!') if __name__ == '__main__': unittest.main()
bsd-3-clause
2,623,275,854,839,513,600
35.627907
72
0.630899
false
SebastianDeiss/paramiko
paramiko/__init__.py
1
3759
# Copyright (C) 2003-2011 Robey Pointer <[email protected]> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. # flake8: noqa import sys from paramiko._version import __version__, __version_info__ if sys.version_info < (2, 6): raise RuntimeError('You need Python 2.6+ for this module.') __author__ = "Jeff Forcier <[email protected]>" __license__ = "GNU Lesser General Public License (LGPL)" from paramiko.transport import SecurityOptions, Transport from paramiko.client import ( SSHClient, MissingHostKeyPolicy, AutoAddPolicy, RejectPolicy, WarningPolicy, ) from paramiko.auth_handler import AuthHandler from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE, GSS_EXCEPTIONS from paramiko.channel import Channel, ChannelFile from paramiko.ssh_exception import ( SSHException, PasswordRequiredException, BadAuthenticationType, ChannelException, BadHostKeyException, AuthenticationException, ProxyCommandFailure, ) from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery from paramiko.rsakey import RSAKey from paramiko.dsskey import DSSKey from paramiko.ecdsakey import ECDSAKey from paramiko.ed25519key import Ed25519Key from paramiko.sftp import SFTPError, BaseSFTP from paramiko.sftp_client import SFTP, SFTPClient from paramiko.sftp_server import SFTPServer from paramiko.sftp_attr import SFTPAttributes from paramiko.sftp_handle import SFTPHandle from paramiko.sftp_si import SFTPServerInterface from paramiko.sftp_file import SFTPFile from paramiko.message import Message from paramiko.packet import Packetizer from paramiko.file import BufferedFile from paramiko.agent import Agent, AgentKey from paramiko.pkey import PKey, PublicBlob from paramiko.hostkeys import HostKeys from paramiko.config import SSHConfig from paramiko.proxy import ProxyCommand from paramiko.common import ( AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE, ) from paramiko.sftp import ( SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED, ) from paramiko.common import io_sleep __all__ = [ 'Transport', 'SSHClient', 'MissingHostKeyPolicy', 'AutoAddPolicy', 'RejectPolicy', 'WarningPolicy', 'SecurityOptions', 'SubsystemHandler', 'Channel', 'PKey', 'RSAKey', 'DSSKey', 'Message', 'SSHException', 'AuthenticationException', 'PasswordRequiredException', 'BadAuthenticationType', 'ChannelException', 'BadHostKeyException', 'ProxyCommand', 'ProxyCommandFailure', 'SFTP', 'SFTPFile', 'SFTPHandle', 'SFTPClient', 'SFTPServer', 'SFTPError', 'SFTPAttributes', 'SFTPServerInterface', 'ServerInterface', 'BufferedFile', 'Agent', 'AgentKey', 'HostKeys', 'SSHConfig', 'util', 'io_sleep', ]
lgpl-2.1
-8,009,111,072,588,806,000
31.128205
79
0.753924
false
mishudark/indie
mongoforms/forms.py
1
4752
from mongoforms.forms import * from .fields import MongoFormFieldGeneratorCustom import types from django import forms from django.utils.datastructures import SortedDict from mongoengine.base import BaseDocument from mongoforms.fields import MongoFormFieldGenerator from mongoforms.utils import mongoengine_validate_wrapper, iter_valid_fields from mongoengine.fields import ReferenceField class MongoFormMetaClassCustom(type): """Metaclass to create a new MongoForm.""" def __new__(cls, name, bases, attrs): # get all valid existing Fields and sort them fields = [(field_name, attrs.pop(field_name)) for field_name, obj in \ attrs.items() if isinstance(obj, forms.Field)] fields.sort(lambda x, y: cmp(x[1].creation_counter, y[1].creation_counter)) # get all Fields from base classes for base in bases[::-1]: if hasattr(base, 'base_fields'): fields = base.base_fields.items() + fields # add the fields as "our" base fields attrs['base_fields'] = SortedDict(fields) # Meta class available? if 'Meta' in attrs and hasattr(attrs['Meta'], 'document') and \ issubclass(attrs['Meta'].document, BaseDocument): doc_fields = SortedDict() formfield_generator = getattr(attrs['Meta'], 'formfield_generator', \ MongoFormFieldGeneratorCustom)() # walk through the document fields for field_name, field in iter_valid_fields(attrs['Meta']): # add field and override clean method to respect mongoengine-validator doc_fields[field_name] = formfield_generator.generate(field_name, field) doc_fields[field_name].clean = mongoengine_validate_wrapper( doc_fields[field_name].clean, field._validate) # write the new document fields to base_fields doc_fields.update(attrs['base_fields']) attrs['base_fields'] = doc_fields # maybe we need the Meta class later attrs['_meta'] = attrs.get('Meta', object()) return super(MongoFormMetaClassCustom, cls).__new__(cls, name, bases, attrs) class MongoFormIndie(forms.BaseForm): """Base MongoForm class. Used to create new MongoForms""" __metaclass__ = MongoFormMetaClassCustom def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=forms.util.ErrorList, label_suffix=':', empty_permitted=False, instance=None): """ initialize the form""" assert isinstance(instance, (types.NoneType, BaseDocument)), \ 'instance must be a mongoengine document, not %s' % \ type(instance).__name__ assert hasattr(self, 'Meta'), 'Meta class is needed to use MongoForm' # new instance or updating an existing one? if instance is None: if self._meta.document is None: raise ValueError('MongoForm has no document class specified.') self.instance = self._meta.document() object_data = {} self.instance._adding = True else: self.instance = instance self.instance._adding = False object_data = {} # walk through the document fields for field_name, field in iter_valid_fields(self._meta): # add field data if needed field_data = getattr(instance, field_name) if isinstance(self._meta.document._fields[field_name], ReferenceField): # field data could be None for not populated refs field_data = field_data and str(field_data.id) object_data[field_name] = field_data # additional initial data available? if initial is not None: object_data.update(initial) for field_name, field in iter_valid_fields(self._meta): if not data.get(field_name, None) and field.default: try: default = field.default() except Exception, e: default = field.default data[field_name] = default self._validate_unique = False super(MongoFormIndie, self).__init__(data, files, auto_id, prefix, object_data, error_class, label_suffix, empty_permitted) def save(self, commit=True): """save the instance or create a new one..""" # walk through the document fields for field_name, field in iter_valid_fields(self._meta): setattr(self.instance, field_name, self.cleaned_data.get(field_name)) if commit: self.instance.save() return self.instance
mit
4,198,350,166,523,423,000
40.684211
88
0.616162
false
TGDiamond/Diamond
qa/rpc-tests/getblocktemplate.py
1
3681
#!/usr/bin/env python # Copyright (c) 2014 The Diamond Core developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listtransactions API from test_framework import DiamondTestFramework from diamondrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * def check_array_result(object_array, to_match, expected): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. """ num_matched = 0 for item in object_array: all_match = True for key,value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue for key,value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value))) num_matched = num_matched+1 if num_matched == 0: raise AssertionError("No objects matched %s"%(str(to_match))) import threading class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid templat = node.getblocktemplate() self.longpollid = templat['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = AuthServiceProxy(node.url, timeout=600) def run(self): self.node.getblocktemplate({'longpollid':self.longpollid}) class GetBlockTemplateTest(DiamondTestFramework): ''' Test longpolling with getblocktemplate. ''' def run_test(self, nodes): print "Warning: this test will take about 70 seconds in the best case. Be patient." nodes[0].setgenerate(True, 10) templat = nodes[0].getblocktemplate() longpollid = templat['longpollid'] # longpollid should not change between successive invocations if nothing else happens templat2 = nodes[0].getblocktemplate() assert(templat2['longpollid'] == longpollid) # Test 1: test that the longpolling wait if we do nothing thr = LongpollThread(nodes[0]) thr.start() # check that thread still lives thr.join(5) # wait 5 seconds or until thread exits assert(thr.is_alive()) # Test 2: test that longpoll will terminate if another node generates a block nodes[1].setgenerate(True, 1) # generate a block on another node # check that thread will exit now that new transaction entered mempool thr.join(5) # wait 5 seconds or until thread exits assert(not thr.is_alive()) # Test 3: test that longpoll will terminate if we generate a block ourselves thr = LongpollThread(nodes[0]) thr.start() nodes[0].setgenerate(True, 1) # generate a block on another node thr.join(5) # wait 5 seconds or until thread exits assert(not thr.is_alive()) # Test 4: test that introducing a new transaction into the mempool will terminate the longpoll thr = LongpollThread(nodes[0]) thr.start() # generate a random transaction and submit it (txid, txhex, fee) = random_transaction(nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20) # after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned thr.join(60 + 20) assert(not thr.is_alive()) if __name__ == '__main__': GetBlockTemplateTest().main()
mit
-8,546,662,378,669,306,000
38.159574
108
0.652268
false
danianr/NINJa
joblist.py
1
1541
from collections import deque class JobList(object): def __init__(self, jobMap=None, initial=None): self.jobs = dict() self.merged= deque() if type(jobMap) is dict: for (user, prev) in jobMap.iteritems(): assert type(prev) is list self.jobs[user] = prev if initial is None: self.merged.extendleft(jobs) if type(initial) is deque: self.merged.extend(initial) def add(self, username, jobId): if username in self.jobs: for n in filter( lambda x: x in self.merged, self.jobs[username]): self.merged.remove(n) self.jobs[username].append(jobId) else: self.jobs[username] = [jobId] self.merged.extendleft(self.jobs[username]) def remove(self, removedJobs): for n in filter( lambda x: x in self.merged, removedJobs): self.merged.remove(n) for jobseq in self.jobs.values(): map( jobseq.remove, filter( lambda x: x in jobseq, removedJobs) ) def __iter__(self): return iter(self.merged) def __getitem__(self, n): return self.merged[n] def __getslice__(self, i, j): return self.merged[i:j] def __delitem__(self, n): self.remove([n]) def __delslice__(self, i, j): self.remove(self, self.merged[i:j]) def __repr__(self): return "JobList( jobMap=%s, initial=%s )" % \ (repr(self.jobs), repr(self.merged) ) def __str__(self): return "%s" % list(self.merged)
mit
-6,834,064,029,022,998,000
23.078125
75
0.573005
false
exercism/xpython
exercises/diffie-hellman/diffie_hellman_test.py
1
1626
import unittest from diffie_hellman import private_key, public_key, secret # Tests adapted from `problem-specifications//canonical-data.json` class DiffieHellmanTest(unittest.TestCase): def test_private_key_is_greater_than_1_and_less_than_p(self): primes = [5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] for p in primes: self.assertTrue(1 < private_key(p) < p) def test_private_key_is_random(self): """ Can fail due to randomness, but most likely will not, due to pseudo-randomness and the large number chosen """ p = 2147483647 private_keys = [private_key(p) for _ in range(5)] self.assertEqual(len(set(private_keys)), len(private_keys)) def test_can_calculate_public_key_using_private_key(self): p = 23 g = 5 private_key = 6 self.assertEqual(8, public_key(p, g, private_key)) def test_can_calculate_secret_using_other_party_s_public_key(self): p = 23 their_public_key = 19 my_private_key = 6 self.assertEqual(2, secret(p, their_public_key, my_private_key)) def test_key_exchange(self): p = 23 g = 5 alice_private_key = private_key(p) bob_private_key = private_key(p) alice_public_key = public_key(p, g, alice_private_key) bob_public_key = public_key(p, g, bob_private_key) secret_a = secret(p, bob_public_key, alice_private_key) secret_b = secret(p, alice_public_key, bob_private_key) self.assertTrue(secret_a == secret_b) if __name__ == "__main__": unittest.main()
mit
3,912,888,463,024,547,000
32.875
72
0.611931
false
jarhill0/ABot
memetext.py
1
6729
spork = 'hi every1 im new!!!!!!! holds up spork my name is katy but u can call me t3h PeNgU1N oF d00m!!!!!!!! lol…as ' \ 'u can see im very random!!!! thats why i came here, 2 meet random ppl like me _… im 13 years old (im mature ' \ '4 my age tho!!) i like 2 watch invader zim w/ my girlfreind (im bi if u dont like it deal w/it) its our ' \ 'favorite tv show!!! bcuz its SOOOO random!!!! shes random 2 of course but i want 2 meet more random ppl =) ' \ 'like they say the more the merrier!!!! lol…neways i hope 2 make alot of freinds here so give me lots of ' \ 'commentses!!!!\nDOOOOOMMMM!!!!!!!!!!!!!!!! <--- me bein random again _^ hehe…toodles!!!!!\n\nlove and ' \ 'waffles,\n\nt3h PeNgU1N oF d00m' settings = 'Current settings:\n/redditlimit followed by a number to set limit of reddit posts displayed by ' \ '/redditposts (example usage: `/redditlimit 5`)\n/subscribe or /unsubscribe followed by a topic (' \ '`xkcd`, `launches`, etc.) to subscribe or unsubscribe the current chat from notifications about ' \ 'that topic\n/timezone followed by a number between -24 and 24 to set your offset from UTC' marines = 'What the fuck did you just fucking say about me, you little bitch? I’ll have you know I graduated top of ' \ 'my class in the Navy Seals, and I’ve been involved in numerous secret raids on Al-Quaeda, and I have over ' \ '300 confirmed kills. I am trained in gorilla warfare and I’m the top sniper in the entire US armed forces.' \ ' You are nothing to me but just another target. I will wipe you the fuck out with precision the likes of ' \ 'which has never been seen before on this Earth, mark my fucking words. You think you can get away with ' \ 'saying that shit to me over the Internet? Think again, fucker. As we speak I am contacting my secret ' \ 'network of spies across the USA and your IP is being traced right now so you better prepare for the ' \ 'storm, maggot. The storm that wipes out the pathetic little thing you call your life. You’re fucking dead,' \ ' kid. I can be anywhere, anytime, and I can kill you in over seven hundred ways, and that’s just with my' \ ' bare hands. Not only am I extensively trained in unarmed combat, but I have access to the entire arsenal' \ ' of the United States Marine Corps and I will use it to its full extent to wipe your miserable ass off the' \ ' face of the continent, you little shit. If only you could have known what unholy retribution your little ' \ '“clever” comment was about to bring down upon you, maybe you would have held your fucking tongue. But you ' \ 'couldn’t, you didn’t, and now you’re paying the price, you goddamn idiot. I will shit fury all over you ' \ 'and you will drown in it. You’re fucking dead, kiddo.' myrynys = 'Whyt thy fyck dyd yyy yyst fyckyng syy ybyyt my, yyy lyttly bytch? y’ll hyvy yyy knyw Y ' \ 'grydyytyd typ yf my clyss yn thy Nyvy Syyls, ynd Y’ve byyn ynvylvyd yn nymyryys sycryt ryyds yn ' \ 'Yl-Qyyydy, ynd Y hyvy yvyr 300 cynfyrmyd kylls. Y ym tryynyd yn gyrylly wyrfyry ynd Y’m thy typ ' \ 'snypyr yn thy yntyry YS yrmyd fyrcys. Yyy yry nythyng ty my byt jyst ynythyr tyrgyt. Y wyll wypy ' \ 'yyy thy fyck yyt wyth prycysyyn thy lykys yf whych hys nyvyr byyn syyn byfyry yn thys Yyrth, ' \ 'myrk my fyckyng wyrds. Yyy thynk yyy cyn gyt ywyy wyth syyyng thyt shyt ty my yvyr thy Yntyrnyt?' \ 'Thynk ygyyn, fyckyr. Ys wy spyyk Y ym cyntyctyng my sycryt nytwyrk yf spyys ycryss thy YSY ynd ' \ 'yyyr YP ys byyng trycyd ryght nyw sy yyy byttyr prypyry fyr thy styrm, myggyt. Thy styrm thyt ' \ 'wypys yyt thy pythytyc lyttly thyng yyy cyll yyyr lyfy. Yyy’ry fyckyng dyyd, kyd. Y cyn by ' \ 'ynywhyry, ynytymy, ynd Y cyn kyll yyy yn yvyr syvyn hyndryd wyys, ynd thyt’s jyst wyth my byry ' \ 'hynds. Nyt ynly ym Y yxtynsyvyly tryynyd yn ynyrmyd cymbyt, byt y hyvy yccyss ty thy yntyry ' \ 'yrsynyl yf thy Ynytyd Stytys Myryny Cyrps ynd Y wyll ysy yt ty yts fyll yxtynt ty wypy yyyr ' \ 'mysyrybly yss yff thy fycy yf thy cyntynynt, yyy lyttly shyt. Yf ynly yyy cyyld hyvy knywn whyt ' \ 'ynhyly rytrybytyyn yyyr lyttly “clyvyr” cymmynt wys abyyt ty bryng dywn ypyn yyy, ' \ 'myyby yyy wyyld hyvy hyld yyyr fyckyng tyngyy. Byt yyy cyyldn’t, yyy dydn’t, ynd nyw yyy’ry ' \ 'pyyyng thy prycy, yyy gyddymn ydyyt. Y wyll shyt fyry yll yvyr yyy ynd yyy wyll drywn yn yt. ' \ 'Yyy’ry fyckyng dyyd, kyddy.' xD = """ 😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂 😂🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒😂 😂🆒💯🆒🆒🆒💯🆒💯💯💯🆒🆒🆒😂 😂🆒💯💯🆒💯💯🆒💯🆒💯💯🆒🆒😂 😂🆒🆒💯🆒💯🆒🆒💯🆒🆒💯💯🆒😂 😂🆒🆒💯💯💯🆒🆒💯🆒🆒🆒💯🆒😂 😂🆒🆒🆒💯🆒🆒🆒💯🆒🆒🆒💯🆒😂 😂🆒🆒💯💯💯🆒🆒💯🆒🆒🆒💯🆒😂 😂🆒🆒💯🆒💯🆒🆒💯🆒🆒💯💯🆒😂 😂🆒💯💯🆒💯💯🆒💯🆒💯💯🆒🆒😂 😂🆒💯🆒🆒🆒💯🆒💯💯💯🆒🆒🆒😂 😂🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒🆒😂 😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂 """ pede = """ ╚═( ͡° ͜ʖ ͡°)═╝ ╚═(███)═╝ ╚═(███)═╝ .╚═(███)═╝ ..╚═(███)═╝ …╚═(███)═╝ …╚═(███)═╝ ..╚═(███)═╝ .╚═(███)═╝ ╚═(███)═╝ .╚═(███)═╝ ..╚═(███)═╝ …╚═(███)═╝ …╚═(███)═╝ ..╚═(███)═╝ .╚═(███)═╝ ╚═(███)═╝ .╚═(███)═╝ ..╚═(███)═╝ …╚═(███)═╝ …╚═(███)═╝ ..╚═(███)═╝ .╚═(███)═╝ ╚═(███)═╝ .╚═(███)═╝ ..╚═(███)═╝ …╚═(███)═╝ …╚═(███)═╝ ..╚═(███)═╝ .╚═(███)═╝ ╚═(███)═╝ .╚═(███)═╝ ..╚═(███)═╝ …╚═(███)═╝ …╚═(███)═╝ ..╚═(███)═╝ .╚═(███)═╝ ╚═(███)═╝ .╚═(███)═╝ ..╚═(███)═╝ …╚═(███)═╝ …╚═(███)═╝ …..╚(███)╝ ……╚(██)╝ ………(█) ……….* """
gpl-3.0
1,667,627,879,406,881,800
48.036036
120
0.580669
false
hit9/skylark
examples/messageboard/messageboard/views.py
1
1358
# coding=utf8 from datetime import datetime from messageboard import app from messageboard.models import Message from flask import flash, render_template, request, redirect, url_for @app.route('/', methods=['GET']) def index(): query = Message.orderby( Message.create_at, desc=True).select() # sort by created time results = query.execute() messages = results.all() return render_template('template.html', messages=messages) @app.route('/create', methods=['POST']) def create(): title = request.form['title'] content = request.form['content'] if title and content: message = Message.create( title=title, content=content, create_at=datetime.now()) if message is not None: # ok flash(dict(type='success', content='New message created')) else: # create failed flash(dict(type='error', content='Failed to create new message')) else: # invalid input flash(dict(type='warning', content='Empty input')) return redirect(url_for('index')) @app.route('/delete/<int:id>') def delete(id): query = Message.at(id).delete() if query.execute(): flash(dict(type='success', content='Message %d dropped' % id)) else: flash(dict(type='error', content='Failed to drop message %d' % id)) return redirect(url_for('index'))
bsd-2-clause
4,533,618,793,132,142,000
29.863636
77
0.648012
false
rwl/muntjac
muntjac/demo/sampler/features/panels/PanelBasicExample.py
1
1214
from muntjac.api import VerticalLayout, Panel, Label, Button from muntjac.ui.button import IClickListener class PanelBasicExample(VerticalLayout, IClickListener): def __init__(self): super(PanelBasicExample, self).__init__() self.setSpacing(True) # Panel 1 - with caption self._panel = Panel('This is a standard Panel') self._panel.setHeight('200px') # we want scrollbars # let's adjust the panels default layout (a VerticalLayout) layout = self._panel.getContent() layout.setMargin(True) # we want a margin layout.setSpacing(True) # and spacing between components self.addComponent(self._panel) # Let's add a few rows to provoke scrollbars: for _ in range(20): l = Label('The quick brown fox jumps over the lazy dog.') self._panel.addComponent(l) # Caption toggle: b = Button('Toggle caption') b.addListener(self, IClickListener) self.addComponent(b) def buttonClick(self, event): if self._panel.getCaption() == '': self._panel.setCaption('This is a standard Panel') else: self._panel.setCaption('')
apache-2.0
2,618,713,346,185,340,000
31.810811
69
0.625206
false
intip/aldryn-bootstrap3
aldryn_bootstrap3/model_fields.py
1
8060
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import from six import with_metaclass import django.core.exceptions import django.db.models import django.forms from django.utils.encoding import smart_text from . import fields class SouthMixinBase(object): south_field_class = '' def south_field_triple(self): """Returns a suitable description of this field for South.""" if not self.south_field_class: raise NotImplementedError('please set south_field_class when using the south field mixin') # We'll just introspect ourselves, since we inherit. from south.modelsinspector import introspector field_class = self.south_field_class args, kwargs = introspector(self) # That's our definition! return field_class, args, kwargs class SouthCharFieldMixin(SouthMixinBase): south_field_class = "django.db.models.fields.CharField" class SouthTextFieldMixin(SouthMixinBase): south_field_class = "django.db.models.fields.TextField" class SouthIntegerFieldMixin(SouthMixinBase): south_field_class = "django.db.models.fields.IntegerField" class Classes(django.db.models.TextField, SouthTextFieldMixin): # TODO: validate default_field_class = fields.Classes def __init__(self, *args, **kwargs): if 'blank' not in kwargs: kwargs['blank'] = True if 'default' not in kwargs: kwargs['default'] = '' if 'help_text' not in kwargs: kwargs['help_text'] = 'space separated classes that are added to the class. see <a href="http://getbootstrap.com/css/" target="_blank">bootstrap docs</a>' super(Classes, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, } defaults.update(kwargs) return super(Classes, self).formfield(**defaults) class Context(django.db.models.fields.CharField, SouthCharFieldMixin): default_field_class = fields.Context def __init__(self, *args, **kwargs): if 'max_length' not in kwargs: kwargs['max_length'] = 255 if 'blank' not in kwargs: kwargs['blank'] = False if 'default' not in kwargs: kwargs['default'] = self.default_field_class.DEFAULT super(Context, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, 'choices_form_class': self.default_field_class, } defaults.update(kwargs) return super(Context, self).formfield(**defaults) def get_choices(self, **kwargs): # if there already is a "blank" choice, don't add another # default blank choice if '' in dict(self.choices).keys(): kwargs['include_blank'] = False return super(Context, self).get_choices(**kwargs) class Size(django.db.models.CharField, SouthCharFieldMixin): default_field_class = fields.Size def __init__(self, *args, **kwargs): if 'max_length' not in kwargs: kwargs['max_length'] = 255 if 'blank' not in kwargs: kwargs['blank'] = True if 'default' not in kwargs: kwargs['default'] = self.default_field_class.DEFAULT super(Size, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, 'choices_form_class': self.default_field_class, } defaults.update(kwargs) return super(Size, self).formfield(**defaults) def get_choices(self, **kwargs): # if there already is a "blank" choice, don't add another # default blank choice if '' in dict(self.choices).keys(): kwargs['include_blank'] = False return super(Size, self).get_choices(**kwargs) class Icon(django.db.models.CharField, SouthCharFieldMixin): default_field_class = fields.Icon def __init__(self, *args, **kwargs): if 'max_length' not in kwargs: kwargs['max_length'] = 255 if 'blank' not in kwargs: kwargs['blank'] = True if 'default' not in kwargs: kwargs['default'] = self.default_field_class.DEFAULT super(Icon, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, } defaults.update(kwargs) return super(Icon, self).formfield(**defaults) class IntegerField(django.db.models.IntegerField, SouthIntegerFieldMixin): default_field_class = fields.Integer def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs): self.min_value, self.max_value = min_value, max_value django.db.models.IntegerField.__init__(self, verbose_name, name, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, 'min_value': self.min_value, 'max_value': self.max_value, } defaults.update(kwargs) return super(IntegerField, self).formfield(**defaults) class MiniText(django.db.models.TextField, SouthTextFieldMixin): default_field_class = fields.MiniText def __init__(self, *args, **kwargs): if 'blank' not in kwargs: kwargs['blank'] = True if 'default' not in kwargs: kwargs['default'] = '' super(MiniText, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, } defaults.update(kwargs) return super(MiniText, self).formfield(**defaults) class LinkOrButton(django.db.models.fields.CharField, SouthCharFieldMixin): default_field_class = fields.LinkOrButton def __init__(self, *args, **kwargs): if 'max_length' not in kwargs: kwargs['max_length'] = 10 if 'blank' not in kwargs: kwargs['blank'] = False if 'default' not in kwargs: kwargs['default'] = self.default_field_class.DEFAULT super(LinkOrButton, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, 'choices_form_class': self.default_field_class, } defaults.update(kwargs) return super(LinkOrButton, self).formfield(**defaults) def get_choices(self, **kwargs): # if there already is a "blank" choice, don't add another # default blank choice if '' in dict(self.choices).keys(): kwargs['include_blank'] = False return super(LinkOrButton, self).get_choices(**kwargs) # class JSONField(json_field.JSONField, SouthTextFieldMixin): # pass class Responsive(MiniText): default_field_class = fields.Responsive def __init__(self, *args, **kwargs): if 'blank' not in kwargs: kwargs['blank'] = True if 'default' not in kwargs: kwargs['default'] = '' super(Responsive, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, } defaults.update(kwargs) return super(Responsive, self).formfield(**defaults) class ResponsivePrint(MiniText): default_field_class = fields.ResponsivePrint def __init__(self, *args, **kwargs): if 'blank' not in kwargs: kwargs['blank'] = True if 'default' not in kwargs: kwargs['default'] = '' super(ResponsivePrint, self).__init__(*args, **kwargs) def formfield(self, **kwargs): defaults = { 'form_class': self.default_field_class, } defaults.update(kwargs) return super(ResponsivePrint, self).formfield(**defaults) #TODO: # * btn-block, disabled # * pull-left, pull-right # * margins/padding
bsd-3-clause
6,422,208,715,115,952,000
32.443983
166
0.6134
false
armenzg/build-mozharness
configs/b2g/taskcluster-lightsaber-nightly.py
1
1263
#!/usr/bin/env python import os.path config = { "default_vcs": "tc-vcs", "default_actions": [ 'checkout-sources', 'checkout-lightsaber', 'build', 'build-symbols', 'make-updates', 'prep-upload', 'submit-to-balrog' ], "balrog_credentials_file": "balrog_credentials", "nightly_build": True, "env": { "GAIA_OPTIMIZE": "1", "B2G_UPDATER": "1", "LIGHTSABER": "1", "BOWER_FLAGS": "--allow-root", "B2G_PATH": "%(work_dir)s", "WGET_OPTS": "-c -q" }, "is_automation": True, "repo_remote_mappings": { 'https://android.googlesource.com/': 'https://git.mozilla.org/external/aosp', 'git://codeaurora.org/': 'https://git.mozilla.org/external/caf', 'https://git.mozilla.org/b2g': 'https://git.mozilla.org/b2g', 'git://github.com/mozilla-b2g/': 'https://git.mozilla.org/b2g', 'git://github.com/mozilla/': 'https://git.mozilla.org/b2g', 'https://git.mozilla.org/releases': 'https://git.mozilla.org/releases', 'http://android.git.linaro.org/git-ro/': 'https://git.mozilla.org/external/linaro', 'git://github.com/apitrace/': 'https://git.mozilla.org/external/apitrace', }, }
mpl-2.0
6,169,796,958,951,442,000
35.085714
91
0.562154
false
alubbock/pysb-legacy
pysb/tools/render_species.py
1
4636
#!/usr/bin/env python import sys import os import re import pygraphviz import pysb.bng def run(model): pysb.bng.generate_equations(model) graph = pygraphviz.AGraph(name="%s species" % model.name, rankdir="LR", fontname='Arial') graph.edge_attr.update(fontname='Arial', fontsize=8) for si, cp in enumerate(model.species): sgraph_name = 'cluster_s%d' % si cp_label = re.sub(r'% ', '%<br align="left"/>', str(cp)) + '<br align="left"/>' sgraph_label = '<<font point-size="10" color="blue">s%d</font><br align="left"/><font face="Consolas" point-size="6">%s</font>>' % (si, cp_label) sgraph = graph.add_subgraph(name=sgraph_name, label=sgraph_label, color="gray75", sortv=sgraph_name) bonds = {} for mi, mp in enumerate(cp.monomer_patterns): monomer_node = '%s_%d' % (sgraph_name, mi) monomer_label = '<<table border="0" cellborder="1" cellspacing="0">' monomer_label += '<tr><td bgcolor="#a0ffa0"><b>%s</b></td></tr>' % mp.monomer.name for site in mp.monomer.sites: site_state = None cond = mp.site_conditions[site] if isinstance(cond, str): site_state = cond elif isinstance(cond, tuple): site_state = cond[0] site_label = site if site_state is not None: site_label += '=<font color="purple">%s</font>' % site_state monomer_label += '<tr><td port="%s">%s</td></tr>' % (site, site_label) for site, value in mp.site_conditions.items(): site_bonds = [] # list of bond numbers if isinstance(value, int): site_bonds.append(value) elif isinstance(value, tuple): site_bonds.append(value[1]) elif isinstance(value, list): site_bonds += value for b in site_bonds: bonds.setdefault(b, []).append((monomer_node, site)) monomer_label += '</table>>' sgraph.add_node(monomer_node, label=monomer_label, shape="none", fontname="Arial", fontsize=8) for bi, sites in bonds.items(): node_names, port_names = zip(*sites) sgraph.add_edge(node_names, tailport=port_names[0], headport=port_names[1], label=str(bi)) return graph.string() usage = """ Usage: python -m pysb.tools.render_species mymodel.py > mymodel.dot Renders the species from a model into the "dot" graph format which can be visualized with Graphviz. To create a PDF from the .dot file, use the Graphviz tools in the following command pipeline: ccomps -x mymodel.dot | dot | gvpack -m0 | neato -n2 -T pdf -o mymodel.pdf You can also change the "dot" command to "circo" or "sfdp" for a different type of layout. Note that you can pipe the output of render_species straight into a Graphviz command pipeline without creating an intermediate .dot file, which is especially helpful if you are making continuous changes to the model and need to visualize your changes repeatedly: python -m pysb.tools.render_species mymodel.py | ccomps -x | dot | gvpack -m0 | neato -n2 -T pdf -o mymodel.pdf Note that some PDF viewers will auto-reload a changed PDF, so you may not even need to manually reopen it every time you rerun the tool. """ usage = usage[1:] # strip leading newline if __name__ == '__main__': # sanity checks on filename if len(sys.argv) <= 1: print usage, exit() model_filename = sys.argv[1] if not os.path.exists(model_filename): raise Exception("File '%s' doesn't exist" % model_filename) if not re.search(r'\.py$', model_filename): raise Exception("File '%s' is not a .py file" % model_filename) sys.path.insert(0, os.path.dirname(model_filename)) model_name = re.sub(r'\.py$', '', os.path.basename(model_filename)) # import it try: # FIXME if the model has the same name as some other "real" module which we use, # there will be trouble (use the imp package and import as some safe name?) model_module = __import__(model_name) except StandardError as e: print "Error in model script:\n" raise # grab the 'model' variable from the module try: model = model_module.__dict__['model'] except KeyError: raise Exception("File '%s' isn't a model file" % model_filename) print run(model)
bsd-2-clause
2,940,613,078,015,021,600
42.735849
153
0.59189
false
xiangarpm/arpym_template
arpym_template/estimation/flexible_probabilities.py
1
4668
# -*- coding: utf-8 -*- """ For details, see `Section 3.1 <https://www.arpm.co/lab/redirect.php?permalink=setting-flexible-probabilities>`_. """ from collections import namedtuple import numpy as np class FlexibleProbabilities(object): """Flexible Probabilities """ def __init__(self, data): self.x = data self.p = np.ones(len(data))/len(data) def shape(self): """Shape of the data """ return self.x.shape def mean(self): """Sample mean with flexible probabilities """ return np.dot(self.p, self.x) def cov(self): """Sample covariance with flexible probabilities """ x_ = self.x - np.mean(self.x, axis=0) return np.dot(np.multiply(np.transpose(x_), self.p), x_) def equal_weight(self): """Equally weighted probabilities """ self.p = np.ones(len(self.x))/len(self.x) def exponential_decay(self, tau): """Exponentail decay probabilities """ t_ = len(self.x) self.p = np.exp(-np.log(2)/tau*(t_-np.arange(0, t_))) self.p = self.p / np.sum(self.p) def smooth_kernel(self, z=None, z_star=None, h=None, gamma=2): """Smooth kernel probabilities """ if z is None: z = self.x[:, 0] if z_star is None: z_star = np.mean(z) if h is None: h = np.std(z) self.p = np.exp(-(np.abs(z - z_star)/h)**gamma) self.p = self.p / np.sum(self.p) def effective_scenarios(self, Type=None): """This def computes the Effective Number of Scenarios of Flexible Probabilities via different types of defs For details on the function, please see |ex_effective_scenarios| |code_effective_scenarios| Note: The exponential of the entropy is set as default, otherwise specify ``Type.ExpEntropy.on = true`` to use the exponential of the entropy or specify ``Type.GenExpEntropy.on = true`` and supply the scalar ``Type.ExpEntropy.g`` to use the generalized exponential of the entropy. Args: Type (tuple): type of def: ``ExpEntropy``, ``GenExpEntropy`` Returns: ens (double): Effective Number of Scenarios .. |ex_effective_scenarios| image:: icon_ex_inline.png :scale: 20 % :target: https://www.arpm.co/lab/redirect.php?permalink=EBEffectNbScenFun .. |code_effective_scenarios| image:: icon-code-1.png :scale: 20 % :target: https://www.arpm.co/lab/redirect.php?code=EffectiveScenarios """ if Type is None: Type = namedtuple('type', ['Entropy']) Type.Entropy = 'Exp' if Type.Entropy != 'Exp': Type.Entropy = 'GenExp' # Code p_ = self.p if Type.Entropy == 'Exp': p_[p_ == 0] = 10 ** (-250) # avoid log(0) in ens computation ens = np.exp([email protected](p_.T)) else: ens = np.sum(p_ ** Type.g) ** (-1 / (Type.g - 1)) return ens def diff_length_mlfp(fp, nu, threshold, smartinverse=0, maxiter=10**5): """Maximum-likelihood with flexible probabilities for different-length series For details on the function, please see |ex_diff_length_mlfp| |code_diff_length_mlfp| Note: We suppose the missing values, if any, are at the beginning. (the farthest observations in the past could be missing). We reshuffle the series in a nested pattern, such that the series with the longer history comes first and the one with the shorter history comes last. Args: fp (FlexibleProbabilities): obsrevations with flexible probabilities nu (double): degrees of freedom for the multivariate Student t-distribution threshold (double): convergence thresholds smartinverse (double, optional): additional parameter: set it to 1 to use LRD smart inverse in the regression process maxiter (int, optional): maximum number of iterations inside ``MaxLikFPTReg`` Returns: mu (numpy.ndarray): DLFP estimate of the location parameter sig2 (numpy.ndarray): DLFP estimate of the dispersion parameter .. |ex_diff_length_mlfp| image:: icon_ex_inline.png :scale: 20 % :target: https://www.arpm.co/lab/redirect.php?permalink=DiffLengthRout .. |code_diff_length_mlfp| image:: icon-code-1.png :scale: 20 % :target: https://www.arpm.co/lab/redirect.php?codeplay=DiffLengthMLFP """ return None
bsd-2-clause
3,235,306,941,653,997,600
32.106383
95
0.592759
false
DigitalCampus/django-oppia
tests/av/models/test_models.py
1
3099
from oppia.test import OppiaTestCase from av.models import UploadedMedia, UploadedMediaImage, image_file_name class AVModelsTest(OppiaTestCase): fixtures = ['tests/test_user.json', 'tests/test_oppia.json', 'tests/test_permissions.json', 'tests/test_av_uploadedmedia.json', 'tests/test_course_permissions.json'] def test_uploadedmedia_str(self): um = UploadedMedia.objects.get(pk=1) self.assertEqual( 'uploaded/2020/11/sample_video.m4v', str(um)) def test_uploadedmediaimage_str(self): um = UploadedMediaImage.objects.get(pk=1) self.assertEqual( 'uploaded/images/fr/am/frame-001_71mRFnT.png', str(um)) def test_media_filename(self): um = UploadedMedia.objects.get(pk=1) self.assertEqual('sample_video.m4v', um.filename()) def test_media_default_image_set(self): um = UploadedMedia.objects.get(pk=1) self.assertEqual('uploaded/images/fr/am/frame-001_71mRFnT.png', um.get_default_image().image.name) def test_media_default_image_not_set(self): um = UploadedMedia.objects.get(pk=2) self.assertEqual('uploaded/images/fr/am/frame-004_DjG9Zk4.png', um.get_default_image().image.name) def test_media_default_image_none(self): um = UploadedMedia.objects.get(pk=3) self.assertRaises(UploadedMediaImage.DoesNotExist) self.assertEqual(None, um.get_default_image()) def test_uploadedmedia_file_missing_embed(self): um = UploadedMedia.objects.create(create_user=self.admin_user, update_user=self.admin_user, file="my_media_file.m4v") um.get_embed_code("http://mydomain.com/") self.assertRaises(FileNotFoundError) def test_uploadedmedia_file_missing_new_delete(self): um = UploadedMedia.objects.create(create_user=self.admin_user, update_user=self.admin_user, file="my_media_file2.m4v") um.delete() self.assertRaises(OSError) def test_uploadedmedia_image_filename(self): um = UploadedMedia.objects.get(pk=1) umi = UploadedMediaImage.objects.create(create_user=self.admin_user, image="my_media_file3.m4v", uploaded_media=um) name = image_file_name(umi, "my_media_file2.m4v") self.assertEqual("uploaded/images/my/_m/my_media_file2.m4v", name) def test_uploadedmedia_file_missing_existing_delete(self): um = UploadedMedia.objects.get(pk=1) umi = UploadedMediaImage.objects.create(create_user=self.admin_user, image="my_media_file3.m4v", uploaded_media=um) umi.delete() self.assertRaises(OSError)
gpl-3.0
-6,459,909,960,788,631,000
40.878378
76
0.576638
false
Azulinho/sunflower-file-manager-with-tmsu-tagging-support
application/plugins/tmsu_column/plugin.py
1
1904
import gtk from plugins.file_list.plugin import Column, FileList from plugin_base.column_extension import ColumnExtension from subprocess import check_output def register_plugin(application): """Register plugin class with application""" application.register_column_extension(FileList, TagsColumn) class BaseColumn(ColumnExtension): """Base class for extending owner and group for item list""" def __init__(self, parent, store): ColumnExtension.__init__(self, parent, store) self._parent = parent # create column object self._create_column() def _create_column(self): """Create column""" self._cell_renderer = gtk.CellRendererText() self._parent.set_default_font_size(self._get_column_name(), 8) self._column = gtk.TreeViewColumn(self._get_column_title()) self._column.pack_start(self._cell_renderer, True) self._column.set_data('name', self._get_column_name()) def _get_column_name(self): """Returns column name""" return None def _get_column_title(self): """Returns column title""" return None def __set_cell_data(self, column, cell, store, selected_iter, data=None): """Set column value""" pass class TagsColumn(BaseColumn): """Adds support for displaying tags in item list""" def __set_cell_data(self, column, cell, store, selected_iter, data=None): """Set column value""" is_parent = store.get_value(selected_iter, Column.IS_PARENT_DIR) value = (store.get_value(selected_iter, Column.TAGS), '')[is_parent] cell.set_property('text', value) def _create_column(self): """Configure column""" BaseColumn._create_column(self) self._column.set_cell_data_func(self._cell_renderer, self.__set_cell_data) def _get_column_name(self): """Returns column name""" return 'tags' def _get_column_title(self): """Returns column title""" return _('Tags') def get_sort_column(self): """Return sort column""" return Column.TAGS
gpl-3.0
-4,224,020,129,806,103,600
25.816901
76
0.707458
false
miquelcampos/GEAR_mc
gear/xsi/rig/component/eyelid_01/guide.py
1
7407
''' This file is part of GEAR_mc. GEAR_mc is a fork of Jeremie Passerin's GEAR project. GEAR is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>. Author: Jeremie Passerin [email protected] www.jeremiepasserin.com Fork Author: Miquel Campos [email protected] www.miqueltd.com Date: 2013 / 08 / 16 ''' ## @package gear.xsi.rig.component.eyelid_01.guide # @author Miquel Campos # ########################################################## # GLOBAL ########################################################## # gear from gear.xsi import xsi, c, XSIMath from gear.xsi.rig.component.guide import ComponentGuide import gear.xsi.applyop as aop # guide info AUTHOR = "Miquel Campos " URL = "http://www.miqueltd.com" EMAIL = "[email protected]" VERSION = [1,0,0] TYPE = "eyelid_01" NAME = "eyelid" DESCRIPTION = "eyelids rig" ########################################################## # CLASS ########################################################## class Guide(ComponentGuide): compType = TYPE compName = NAME description = DESCRIPTION author = AUTHOR url = URL email = EMAIL version = VERSION # ===================================================== ## # @param self def postInit(self): self.pick_transform = ["root", "#_loc"] self.save_transform = ["root", "upVector", "direction", "#_loc"] self.save_blade = ["blade"] self.addMinMax("#_loc", 1, -1) # ===================================================== ## Add more object to the object definition list. # @param self def addObjects(self): self.root = self.addRoot() self.locs = self.addLocMulti("#_loc", self.root, False) vTemp = XSIMath.CreateVector3(self.root.Kinematics.Global.PosX.Value , self.root.Kinematics.Global.PosY.Value +2, self.root.Kinematics.Global.PosZ.Value ) self.upVector = self.addLoc("upVector", self.root, vTemp ) vTemp = XSIMath.CreateVector3(self.root.Kinematics.Global.PosX.Value , self.root.Kinematics.Global.PosY.Value , self.root.Kinematics.Global.PosZ.Value +2 ) self.direction = self.addLoc("direction", self.root, vTemp ) centers = [self.direction, self.root, self.upVector] self.dispcrv = self.addDispCurve("crvUp", centers) self.blade = self.addBlade("blade", self.root, self.upVector) centers = [] centers.extend(self.locs) self.dispcrv = self.addDispCurve("crv", centers) # ===================================================== ## Add more parameter to the parameter definition list. # @param self def addParameters(self): # eye corners controlers self.pCornerA = self.addParam("cornerARef", c.siInt4, None, 0, None) self.pCornerAArray = self.addParam("cornerARefArray", c.siString, "") self.pCornerB = self.addParam("cornerBRef", c.siInt4, None, 0, None) self.pCornerBArray = self.addParam("cornerBRefArray", c.siString, "") # ===================================================== ## Add layout for new parameters. # @param self def addLayout(self): # -------------------------------------------------- # Items cornerAItemsCode = "cornerARefItems = []" +"\r\n"+\ "if PPG."+self.pCornerAArray.scriptName+".Value:" +"\r\n"+\ " a = PPG."+self.pCornerAArray.scriptName+".Value.split(',')" +"\r\n"+\ " for i, v in enumerate(a):" +"\r\n"+\ " cornerARefItems.append(a[i])" +"\r\n"+\ " cornerARefItems.append(i)" +"\r\n"+\ "item.UIItems = cornerARefItems" +"\r\n" cornerBItemsCode = "cornerBRefItems = []" +"\r\n"+\ "if PPG."+self.pCornerBArray.scriptName+".Value:" +"\r\n"+\ " a = PPG."+self.pCornerBArray.scriptName+".Value.split(',')" +"\r\n"+\ " for i, v in enumerate(a):" +"\r\n"+\ " cornerBRefItems.append(a[i])" +"\r\n"+\ " cornerBRefItems.append(i)" +"\r\n"+\ "item.UIItems = cornerBRefItems" +"\r\n" # -------------------------------------------------- # Layout tab = self.layout.addTab("Options") # IK/Upv References group = tab.addGroup("Eyelids controls") row = group.addRow() item = row.addEnumControl(self.pCornerA.scriptName, [], "Corner control A", c.siControlCombo) item.setCodeAfter(cornerAItemsCode) row.addButton("PickCornerARef", "Pick New") row.addButton("DeleteCornerARef", "Delete") row = group.addRow() item = row.addEnumControl(self.pCornerB.scriptName, [], "Corner control B", c.siControlCombo) item.setCodeAfter(cornerBItemsCode) row.addButton("PickCornerBRef", "Pick New") row.addButton("DeleteCornerBRef", "Delete") # ===================================================== ## Add logic for new layout. # @param self def addLogic(self): self.logic.addGlobalCode("from gear.xsi.rig.component import logic\r\nreload(logic)") self.logic.addOnClicked("PickCornerARef", "prop = PPG.Inspected(0)\r\n" + "logic.pickReferences(prop, '"+self.pCornerAArray.scriptName+"', '"+self.pCornerA.scriptName+"')\r\n" + "PPG.Refresh() \r\n") self.logic.addOnClicked("DeleteCornerARef", "prop = PPG.Inspected(0)\r\n" + "logic.deleteReference(prop, '"+self.pCornerAArray.scriptName+"', '"+self.pCornerA.scriptName+"')\r\n" + "PPG.Refresh() \r\n") self.logic.addOnClicked("PickCornerBRef", "prop = PPG.Inspected(0)\r\n" + "logic.pickReferences(prop, '"+self.pCornerBArray.scriptName+"', '"+self.pCornerB.scriptName+"')\r\n" + "PPG.Refresh() \r\n") self.logic.addOnClicked("DeleteCornerBRef", "prop = PPG.Inspected(0)\r\n" + "logic.deleteReference(prop, '"+self.pCornerBArray.scriptName+"', '"+self.pCornerB.scriptName+"')\r\n" + "PPG.Refresh() \r\n")
lgpl-3.0
-5,199,398,780,847,984,000
39.26087
164
0.509788
false
esrille/replace-with-kanji-by-tutcode
mazegaki/kigou.py
1
1585
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Copyright 2017 Esrille Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 記号やギリシア文字をつかっている語をリストアップします。 import re import sys re_kigou = re.compile(r"[〇〻\u0370-\u03FF¬°∃∧◇∨≪∪∩〓△▲▽▼■∀≒◆◇≫※□⇔≡⇒∈⊆⊇⊂⊃○●◎〒∵√]") re_kana = re.compile(r"[ぁ-んァ-ヶー]") re_non_regular_yomi = re.compile(r"[^ぁ-んァ-ヶー]") def is_inflectable(kana): return l[0][-1] == "―"; # # main # if __name__ == "__main__": for line in sys.stdin: l = line.split(" ", 1) kana = l[0] if re_non_regular_yomi.search(kana): continue; kanji = l[1].strip(" \n/").split("/") for cand in kanji[:]: if not re_kigou.search(cand): kanji.remove(cand) continue if re_kana.search(cand): kanji.remove(cand) continue if kanji: print(kana, " /", '/'.join(kanji), "/", sep='')
apache-2.0
5,985,953,077,179,379,000
27.66
79
0.59037
false
mitsuhiko/django
tests/regressiontests/signing/tests.py
1
4351
from django.core import signing from django.test import TestCase from django.utils.encoding import force_unicode class TestSigner(TestCase): def test_signature(self): "signature() method should generate a signature" signer = signing.Signer('predictable-secret') signer2 = signing.Signer('predictable-secret2') for s in ( 'hello', '3098247:529:087:', u'\u2019'.encode('utf8'), ): self.assertEqual( signer.signature(s), signing.base64_hmac(signer.salt + 'signer', s, 'predictable-secret') ) self.assertNotEqual(signer.signature(s), signer2.signature(s)) def test_signature_with_salt(self): "signature(value, salt=...) should work" signer = signing.Signer('predictable-secret', salt='extra-salt') self.assertEqual( signer.signature('hello'), signing.base64_hmac('extra-salt' + 'signer', 'hello', 'predictable-secret')) self.assertNotEqual( signing.Signer('predictable-secret', salt='one').signature('hello'), signing.Signer('predictable-secret', salt='two').signature('hello')) def test_sign_unsign(self): "sign/unsign should be reversible" signer = signing.Signer('predictable-secret') examples = ( 'q;wjmbk;wkmb', '3098247529087', '3098247:529:087:', 'jkw osanteuh ,rcuh nthu aou oauh ,ud du', u'\u2019', ) for example in examples: self.assertNotEqual( force_unicode(example), force_unicode(signer.sign(example))) self.assertEqual(example, signer.unsign(signer.sign(example))) def unsign_detects_tampering(self): "unsign should raise an exception if the value has been tampered with" signer = signing.Signer('predictable-secret') value = 'Another string' signed_value = signer.sign(value) transforms = ( lambda s: s.upper(), lambda s: s + 'a', lambda s: 'a' + s[1:], lambda s: s.replace(':', ''), ) self.assertEqual(value, signer.unsign(signed_value)) for transform in transforms: self.assertRaises( signing.BadSignature, signer.unsign, transform(signed_value)) def test_dumps_loads(self): "dumps and loads be reversible for any JSON serializable object" objects = ( ['a', 'list'], 'a string', u'a unicode string \u2019', {'a': 'dictionary'}, ) for o in objects: self.assertNotEqual(o, signing.dumps(o)) self.assertEqual(o, signing.loads(signing.dumps(o))) def test_decode_detects_tampering(self): "loads should raise exception for tampered objects" transforms = ( lambda s: s.upper(), lambda s: s + 'a', lambda s: 'a' + s[1:], lambda s: s.replace(':', ''), ) value = { 'foo': 'bar', 'baz': 1, } encoded = signing.dumps(value) self.assertEqual(value, signing.loads(encoded)) for transform in transforms: self.assertRaises( signing.BadSignature, signing.loads, transform(encoded)) class TestTimestampSigner(TestCase): def test_timestamp_signer(self): value = u'hello' signer = signing.TimestampSigner('predictable-key', time=lambda: 123456789) ts = signer.sign(value) self.assertNotEqual(ts, signing.Signer('predictable-key').sign(value)) self.assertEqual(signer.unsign(ts), value) signer = signing.TimestampSigner('predictable-key', time=lambda: 123456800) self.assertEqual(signer.unsign(ts, max_age=12), value) self.assertEqual(signer.unsign(ts, max_age=11), value) self.assertRaises( signing.SignatureExpired, signer.unsign, ts, max_age=10) def test_timestamp_precision(self): one = signing.TimestampSigner('key', time=lambda: 123.4567).sign('v') two = signing.TimestampSigner('key', time=lambda: 123.4568).sign('v') self.assertNotEqual(one, two)
bsd-3-clause
-3,956,434,302,167,085,000
36.508621
80
0.57504
false
peastman/cbang
config/rpm/__init__.py
1
4971
import os import shutil from SCons.Script import * from SCons.Action import CommandAction def replace_dash(s): return s.replace('-', '_') def write_spec_text_section(f, env, name, var): if var in env: f.write('%%%s\n%s\n\n' % (name, env.get(var).strip())) def write_spec_script(f, env, name, var): if var in env: script = env.get(var) input = None try: input = open(script, 'r') contents = input.read().strip() finally: if input is not None: input.close() f.write('%%%s\n%s\n\n' % (name, contents)) def install_files(f, env, key, build_dir, path, prefix = None, perms = None, dperms = 0755): if perms is None: perms = 0644 if key in env: target = build_dir + path # Copy env.CopyToPackage(env.get(key), target, perms, dperms) # Write files list for src, dst, mode in env.ResolvePackageFileMap(env.get(key), target): if prefix is not None: f.write(prefix + ' ') f.write(dst[len(build_dir):] + '\n') def build_function(target, source, env): name = env.get('package_name_lower') # Create package build dir build_dir = 'build/%s-RPM' % name if os.path.exists(build_dir): shutil.rmtree(build_dir) os.makedirs(build_dir) # Create the SPEC file spec_file = 'build/%s.spec' % name f = None try: f = open(spec_file, 'w') # Create the preamble write_var = env.WriteVariable write_var(env, f, 'Summary', 'summary') write_var(env, f, 'Name', 'package_name_lower', None, replace_dash) write_var(env, f, 'Version', 'version', None, replace_dash) write_var(env, f, 'Release', 'package_build', '1', replace_dash) write_var(env, f, 'License', 'rpm_license') write_var(env, f, 'Group', 'rpm_group') write_var(env, f, 'URL', 'url') write_var(env, f, 'Vendor', 'vendor') write_var(env, f, 'Packager', 'maintainer') write_var(env, f, 'Icon', 'icon') write_var(env, f, 'Prefix', 'prefix') #write_var(env, f, 'BuildArch', 'package_arch', env.GetPackageArch()) write_var(env, f, 'Provides', 'rpm_provides', multi = True) write_var(env, f, 'Conflicts', 'rpm_conflicts', multi = True) write_var(env, f, 'Obsoletes', 'rpm_obsoletes', multi = True) write_var(env, f, 'BuildRequires', 'rpm_build_requires', multi = True) write_var(env, f, 'Requires(pre)', 'rpm_pre_requires', multi = True) write_var(env, f, 'Requires', 'rpm_requires', multi = True) write_var(env, f, 'Requires(postun)', 'rpm_postun_requires', multi = True) # Description write_spec_text_section(f, env, 'description', 'description') # Scripts for script in ['prep', 'build', 'install', 'clean', 'pre', 'post', 'preun', 'postun', 'verifyscript']: write_spec_script(f, env, script, 'rpm_' + script) # Files if 'rpm_filelist' in env: f.write('%%files -f %s\n' % env.get('rpm_filelist')) else: f.write('%files\n') f.write('%defattr(- root root)\n') for files in [ ['documents', '/usr/share/doc/' + name, '%doc', None], ['programs', '/usr/bin', '%attr(0775 root root)', 0755], ['scripts', '/usr/bin', '%attr(0775 root root)', 0755], ['desktop_menu', '/usr/share/applications', None, None], ['init_d', '/etc/init.d', '%config %attr(0775 root root)', None], ['config', '/etc/' + name, '%config', None], ['icons', '/usr/share/pixmaps', None, None], ['platform_independent', '/usr/share/' + name, None, None], ]: install_files(f, env, files[0], build_dir, files[1], files[2], files[3]) # ChangeLog write_spec_text_section(f, env, 'changelog', 'rpm_changelog') finally: if f is not None: f.close() # Create directories needed by rpmbuild for dir in ['BUILD', 'BUILDROOT', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']: dir = 'build/' + dir if not os.path.exists(dir): os.makedirs(dir) # Build the package build_dir = os.path.realpath(build_dir) cmd = 'rpmbuild -bb --buildroot %s --define "_topdir %s/build" ' \ '--target %s %s' % ( build_dir, os.getcwd(), env.GetPackageArch(), spec_file) CommandAction(cmd).execute(target, [build_dir], env) # Move the package target = str(target[0]) path = 'build/RPMS/' + env.GetPackageArch() + '/' + target shutil.move(path, target) def generate(env): bld = Builder(action = build_function, source_factory = SCons.Node.FS.Entry, source_scanner = SCons.Defaults.DirScanner) env.Append(BUILDERS = {'RPM' : bld}) return True def exists(): return 1
lgpl-2.1
-2,002,260,641,959,615,500
32.816327
78
0.554416
false
kaos-addict/weborf
python_cgi_weborf/cgi.py
1
8232
#!/usr/bin/python # -*- coding: utf-8 -*- ''' Weborf Copyright (C) 2009 Salvo "LtWorf" Tomaselli Weborf is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. @author Salvo "LtWorf" Tomaselli <[email protected]> This package provides useful functions for cgi scripts ''' import sys import os def pyinfo(): '''Shows information page''' print "<h1>Weborf Python CGI Module</h1>" print "<p>Version 0.2</p>" print "<p>Written by Salvo 'LtWorf' Tomaselli <[email protected]></p>" i_vars=("GET","POST","SERVER","SESSION","COOKIE","FILES") for var in i_vars: v=eval(var) if isinstance(v,list): l=True else: #Dict l=False print "<H2>%s</H2>" % var print "<table border=1>" for j in v: if l: print "<tr><td>%s</td></tr>" % (j) else: print "<tr><td>%s</td><td><code>%s</code></td></tr>" % (j,v[j]) print "</table>" print "<p><h2>Weborf</h2></p><p>This program comes with ABSOLUTELY NO WARRANTY.<br>This is free software, and you are welcome to redistribute it<br>under certain conditions.<br>For details see the GPLv3 Licese.</p>" def __post_escape(val): '''Post fields use certains escapes. This function returns the original string. This function is for internal use, not meant for use by others''' val=val.replace("+"," ") #Replaces all + with a space i=val.find("%") #% is the char for an exadecimal escape while i!=-1: #If there is a % in the non parsed part of the string s=val[i+1] + val[i+2] #Extract the exadecimal code if s!="37": #Replaces all the escapes in the string val=val.replace("%"+s,chr(int(s,16))) else: '''Replaces only once because this char is a % so there would be % that aren't escapes in the non parsed part of the string''' val=val.replace("%"+s,chr(int(s,16)),1) i=val.find("%",i+1) return val def __read_post(): '''Reads POST data. This function is for internal use.''' #Reading POST Data if 'CONTENT_LENGTH' not in os.environ: return None RAW=sys.stdin.read(int(os.getenv('CONTENT_LENGTH'))) if os.getenv('CONTENT_TYPE')=='application/x-www-form-urlencoded': for i in RAW.split("&"): v=i.split("=") POST[__post_escape(v[0])]=__post_escape(v[1]) elif os.getenv('CONTENT_TYPE').startswith('multipart/form-data'): #Finding boundary for i in os.getenv('CONTENT_TYPE').split("; "): if i.strip().startswith("boundary"): boundary=i.split("=")[1] files=RAW.split(boundary) for i in files: j=i.split("\r\n\r\n") if len(j)==1: continue dic={} dic['content']=j[1][:-2] fields=j[0].split("\r\n") for k in fields: a=k.split(": ",1) if len(a)==2: dic[a[0]]=a[1] elif len(a[0])!=0: dic[a[0]]=None for k in dic['Content-Disposition'].split("; "): d=k.split("=",1) if len(d)>1: dic[d[0]]=d[1].replace("\"","") else: dic[d[0]]=None FILES.append(dic) return RAW def redirect(location): '''Sends to the client the request to redirect to another page. It will work only if headers aren't sent yet. It will make the script terminate immediately and redirect.''' os.write(1,"Status: 303\r\nLocation: "+location+"\r\n\r\n") #Writes location header sys.exit(0) #Redirects def savesession(): '''Saves the session to the file. Before terminating the script, this function has to be executed to ensure that the session is saved ''' import csv if 'PHPSESSID' not in COOKIE==None: return #No session to save #Opens the file with the session fp=file(TMPDIR+"/"+COOKIE['PHPSESSID'],"w") writer=csv.writer(fp) #Converting dictionary into 2 level array for csv module a=[] for i in SESSION: a.append((i,SESSION[i])) writer.writerows(a) fp.close() def session_start(): '''Inits the session vars''' if 'PHPSESSID' not in COOKIE or COOKIE['PHPSESSID']==None: #No session, creating a new one import random import md5 #Creating session's id with random numbers and multiple hashes r=random.Random() a=md5.md5(os.getenv("SCRIPT_FILENAME")).hexdigest()+md5.md5(str(r.random())).hexdigest() for i in range(10): a=md5.md5(a).hexdigest()+md5.md5(str(r.random())).hexdigest() s_id= "weborf-%s-%s" % (str(os.getpid()), a) setcookie('PHPSESSID',s_id) COOKIE['PHPSESSID']=s_id else:#Session exists, loading data import time try: #If session expired after inactivity if (os.stat(TMPDIR+"/"+COOKIE['PHPSESSID'])[7] + SESSIONEXPIRE) < time.time(): #Deletes old session file, just to try to avoid to fill the disk os.unlink(TMPDIR+"/"+COOKIE['PHPSESSID']) #Creating an empty session COOKIE['PHPSESSID']=None session_start() return import csv fp=file(TMPDIR+"/"+COOKIE['PHPSESSID']) reader=csv.reader(fp) #Creating a csv reader for i in reader.__iter__(): #Iterating rows SESSION[i[0]]=i[1] except: #Start sessions with a new session id COOKIE['PHPSESSID']=None session_start() def setcookie(name,value,expires=None): '''Sets a cookie, by default it will be a session cookie. Expires is the time in seconds to wait to make the cookie expire''' if expires!=None: s= "Set-Cookie: %s=%s; Max-Age=%s\r\n" % (str(name),str(value),str(expires)) else: s= "Set-Cookie: %s=%s\r\n" % (str(name),str(value)) sys.stdout.write(s) COOKIE[str(name)]=str(value) def finalize_headers(content="text/html"): '''This function finalizes headers. After calling this function the script can output its data. If Content-Type of the page is not text/html, it must be specified as parameter here.''' sys.stdout.write("Content-Type: %s\r\n\r\n"%content) def __get_array(sep,query): '''Returns dictionary containing all the data passed via GET''' dic={} if query==None: return dic for p in query.split(sep): i=p.split("=",1) if len(i)!=1: dic[i[0]]=i[1] elif len(i[0])!=0: dic[i[0]]=None return dic def __auth_fields(): '''If there is authentication, gets username and password''' #Deconding auth field v=os.getenv("HTTP_AUTHORIZATION") if v!=None: import base64 q=v.split(" ") os.environ['AUTH_TYPE']=q[0] auth=base64.b64decode(q[1]).split(":",1) os.environ['AUTH_USER']=auth[0] os.environ['AUTH_PW']=auth[1] #Loading configuration from file or setting default try: execfile("/etc/weborf/pywrapper.conf") except: TMPDIR="/tmp" SESSIONEXPIRE=600 #chdir_to_file(os.getenv("SCRIPT_FILENAME")) __auth_fields() #Changing the order of those lines can be dangerous COOKIE=__get_array('; ',os.getenv("HTTP_COOKIE")) GET=__get_array('&',os.getenv("QUERY_STRING")) SESSION={} POST={} FILES=[] RAW=__read_post() SERVER=os.environ #Executes file #execfile(os.getenv("SCRIPT_FILENAME")) #savesession()
gpl-3.0
-1,482,226,055,591,729,700
32.737705
219
0.58467
false
cvsuser-chromium/chromium
chrome/common/extensions/docs/server2/caching_file_system.py
1
4951
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import posixpath import sys from file_system import FileSystem, StatInfo, FileNotFoundError from future import Future from object_store_creator import ObjectStoreCreator class _AsyncUncachedFuture(object): def __init__(self, uncached_read_futures, stats_for_uncached, current_results, file_system, object_store): self._uncached_read_futures = uncached_read_futures self._stats_for_uncached = stats_for_uncached self._current_results = current_results self._file_system = file_system self._object_store = object_store def Get(self): new_results = self._uncached_read_futures.Get() # Update the cached data in the object store. This is a path -> (read, # version) mapping. self._object_store.SetMulti(dict( (path, (new_result, self._stats_for_uncached[path].version)) for path, new_result in new_results.iteritems())) new_results.update(self._current_results) return new_results class CachingFileSystem(FileSystem): '''FileSystem which implements a caching layer on top of |file_system|. It's smart, using Stat() to decided whether to skip Read()ing from |file_system|, and only Stat()ing directories never files. ''' def __init__(self, file_system, object_store_creator): self._file_system = file_system def create_object_store(category, **optargs): return object_store_creator.Create( CachingFileSystem, category='%s/%s' % (file_system.GetIdentity(), category), **optargs) self._stat_object_store = create_object_store('stat') # The read caches can start populated (start_empty=False) because file # updates are picked up by the stat, so it doesn't need the force-refresh # which starting empty is designed for. Without this optimisation, cron # runs are extra slow. self._read_object_store = create_object_store('read', start_empty=False) self._read_binary_object_store = create_object_store('read-binary', start_empty=False) def Refresh(self): return self._file_system.Refresh() def Stat(self, path): '''Stats the directory given, or if a file is given, stats the file's parent directory to get info about the file. ''' # Always stat the parent directory, since it will have the stat of the child # anyway, and this gives us an entire directory's stat info at once. dir_path, file_path = posixpath.split(path) if dir_path and not dir_path.endswith('/'): dir_path += '/' # ... and we only ever need to cache the dir stat, too. dir_stat = self._stat_object_store.Get(dir_path).Get() if dir_stat is None: dir_stat = self._file_system.Stat(dir_path) assert dir_stat is not None # should raise a FileNotFoundError self._stat_object_store.Set(dir_path, dir_stat) if path == dir_path: stat_info = dir_stat else: file_version = dir_stat.child_versions.get(file_path) if file_version is None: raise FileNotFoundError('No stat found for %s in %s' % (path, dir_path)) stat_info = StatInfo(file_version) return stat_info def Read(self, paths, binary=False): '''Reads a list of files. If a file is in memcache and it is not out of date, it is returned. Otherwise, the file is retrieved from the file system. ''' read_object_store = (self._read_binary_object_store if binary else self._read_object_store) read_values = read_object_store.GetMulti(paths).Get() stat_values = self._stat_object_store.GetMulti(paths).Get() results = {} # maps path to read value uncached = {} # maps path to stat value for path in paths: stat_value = stat_values.get(path) if stat_value is None: # TODO(cduvall): do a concurrent Stat with the missing stat values. try: stat_value = self.Stat(path) except: return Future(exc_info=sys.exc_info()) read_value = read_values.get(path) if read_value is None: uncached[path] = stat_value continue read_data, read_version = read_value if stat_value.version != read_version: uncached[path] = stat_value continue results[path] = read_data if not uncached: return Future(value=results) return Future(delegate=_AsyncUncachedFuture( self._file_system.Read(uncached.keys(), binary=binary), uncached, results, self, read_object_store)) def GetIdentity(self): return self._file_system.GetIdentity() def __repr__(self): return '<%s of %s>' % (type(self).__name__, type(self._file_system).__name__)
bsd-3-clause
-7,957,290,888,908,834,000
37.084615
80
0.646738
false
parkbyte/electrumparkbyte
lib/plugins.py
1
17957
#!/usr/bin/env python # # Electrum - lightweight ParkByte client # Copyright (C) 2015 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from collections import namedtuple import traceback import sys import os import imp import pkgutil import time from util import * from i18n import _ from util import profiler, PrintError, DaemonThread, UserCancelled class Plugins(DaemonThread): @profiler def __init__(self, config, is_local, gui_name): DaemonThread.__init__(self) if is_local: find = imp.find_module('plugins') plugins = imp.load_module('electrum_plugins', *find) else: plugins = __import__('electrum_plugins') self.pkgpath = os.path.dirname(plugins.__file__) self.config = config self.hw_wallets = {} self.plugins = {} self.gui_name = gui_name self.descriptions = {} self.device_manager = DeviceMgr(config) self.load_plugins() self.add_jobs(self.device_manager.thread_jobs()) self.start() def load_plugins(self): for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]): m = loader.find_module(name).load_module(name) d = m.__dict__ gui_good = self.gui_name in d.get('available_for', []) # We register wallet types even if the GUI isn't provided # otherwise the user gets a misleading message like # "Unknown wallet type: 2fa" details = d.get('registers_wallet_type') if details: self.register_plugin_wallet(name, gui_good, details) if not gui_good: continue self.descriptions[name] = d if not d.get('requires_wallet_type') and self.config.get('use_' + name): try: self.load_plugin(name) except BaseException as e: traceback.print_exc(file=sys.stdout) self.print_error("cannot initialize plugin %s:" % name, str(e)) def get(self, name): return self.plugins.get(name) def count(self): return len(self.plugins) def load_plugin(self, name): full_name = 'electrum_plugins.' + name + '.' + self.gui_name loader = pkgutil.find_loader(full_name) if not loader: raise RuntimeError("%s implementation for %s plugin not found" % (self.gui_name, name)) p = loader.load_module(full_name) plugin = p.Plugin(self, self.config, name) self.add_jobs(plugin.thread_jobs()) self.plugins[name] = plugin self.print_error("loaded", name) return plugin def close_plugin(self, plugin): self.remove_jobs(plugin.thread_jobs()) def enable(self, name): self.config.set_key('use_' + name, True, True) p = self.get(name) if p: return p return self.load_plugin(name) def disable(self, name): self.config.set_key('use_' + name, False, True) p = self.get(name) if not p: return self.plugins.pop(name) p.close() self.print_error("closed", name) def toggle(self, name): p = self.get(name) return self.disable(name) if p else self.enable(name) def is_available(self, name, w): d = self.descriptions.get(name) if not d: return False deps = d.get('requires', []) for dep, s in deps: try: __import__(dep) except ImportError: return False requires = d.get('requires_wallet_type', []) return not requires or w.wallet_type in requires def hardware_wallets(self, action): wallet_types, descs = [], [] for name, (gui_good, details) in self.hw_wallets.items(): if gui_good: try: p = self.wallet_plugin_loader(name) if action == 'restore' or p.is_enabled(): wallet_types.append(details[1]) descs.append(details[2]) except: traceback.print_exc() self.print_error("cannot load plugin for:", name) return wallet_types, descs def register_plugin_wallet(self, name, gui_good, details): from wallet import Wallet def dynamic_constructor(storage): return self.wallet_plugin_loader(name).wallet_class(storage) if details[0] == 'hardware': self.hw_wallets[name] = (gui_good, details) self.print_error("registering wallet %s: %s" %(name, details)) Wallet.register_plugin_wallet(details[0], details[1], dynamic_constructor) def wallet_plugin_loader(self, name): if not name in self.plugins: self.load_plugin(name) return self.plugins[name] def run(self): while self.is_running(): time.sleep(0.1) self.run_jobs() self.print_error("stopped") hook_names = set() hooks = {} def hook(func): hook_names.add(func.func_name) return func def run_hook(name, *args): results = [] f_list = hooks.get(name, []) for p, f in f_list: if p.is_enabled(): try: r = f(*args) except Exception: print_error("Plugin error") traceback.print_exc(file=sys.stdout) r = False if r: results.append(r) if results: assert len(results) == 1, results return results[0] class BasePlugin(PrintError): def __init__(self, parent, config, name): self.parent = parent # The plugins object self.name = name self.config = config self.wallet = None # add self to hooks for k in dir(self): if k in hook_names: l = hooks.get(k, []) l.append((self, getattr(self, k))) hooks[k] = l def diagnostic_name(self): return self.name def __str__(self): return self.name def close(self): # remove self from hooks for k in dir(self): if k in hook_names: l = hooks.get(k, []) l.remove((self, getattr(self, k))) hooks[k] = l self.parent.close_plugin(self) self.on_close() def on_close(self): pass def requires_settings(self): return False def thread_jobs(self): return [] def is_enabled(self): return self.is_available() and self.config.get('use_'+self.name) is True def is_available(self): return True def settings_dialog(self): pass class DeviceNotFoundError(Exception): pass class DeviceUnpairableError(Exception): pass Device = namedtuple("Device", "path interface_number id_ product_key") DeviceInfo = namedtuple("DeviceInfo", "device description initialized") class DeviceMgr(ThreadJob, PrintError): '''Manages hardware clients. A client communicates over a hardware channel with the device. In addition to tracking device HID IDs, the device manager tracks hardware wallets and manages wallet pairing. A HID ID may be paired with a wallet when it is confirmed that the hardware device matches the wallet, i.e. they have the same master public key. A HID ID can be unpaired if e.g. it is wiped. Because of hotplugging, a wallet must request its client dynamically each time it is required, rather than caching it itself. The device manager is shared across plugins, so just one place does hardware scans when needed. By tracking HID IDs, if a device is plugged into a different port the wallet is automatically re-paired. Wallets are informed on connect / disconnect events. It must implement connected(), disconnected() callbacks. Being connected implies a pairing. Callbacks can happen in any thread context, and we do them without holding the lock. Confusingly, the HID ID (serial number) reported by the HID system doesn't match the device ID reported by the device itself. We use the HID IDs. This plugin is thread-safe. Currently only devices supported by hidapi are implemented.''' def __init__(self, config): super(DeviceMgr, self).__init__() # Keyed by wallet. The value is the device id if the wallet # has been paired, and None otherwise. self.wallets = {} # A list of clients. The key is the client, the value is # a (path, id_) pair. self.clients = {} # What we recognise. Each entry is a (vendor_id, product_id) # pair. self.recognised_hardware = set() # For synchronization self.lock = threading.RLock() self.config = config def thread_jobs(self): # Thread job to handle device timeouts return [self] def run(self): '''Handle device timeouts. Runs in the context of the Plugins thread.''' with self.lock: clients = list(self.clients.keys()) cutoff = time.time() - self.config.get_session_timeout() for client in clients: client.timeout(cutoff) def register_devices(self, device_pairs): for pair in device_pairs: self.recognised_hardware.add(pair) def create_client(self, device, handler, plugin): # Get from cache first client = self.client_lookup(device.id_) if client: return client client = plugin.create_client(device, handler) if client: self.print_error("Registering", client) with self.lock: self.clients[client] = (device.path, device.id_) return client def wallet_id(self, wallet): with self.lock: return self.wallets.get(wallet) def wallet_by_id(self, id_): with self.lock: for wallet, wallet_id in self.wallets.items(): if wallet_id == id_: return wallet return None def unpair_wallet(self, wallet): with self.lock: if not wallet in self.wallets: return wallet_id = self.wallets.pop(wallet) client = self.client_lookup(wallet_id) self.clients.pop(client, None) wallet.unpaired() if client: client.close() def unpair_id(self, id_): with self.lock: wallet = self.wallet_by_id(id_) if wallet: self.unpair_wallet(wallet) def pair_wallet(self, wallet, id_): with self.lock: self.wallets[wallet] = id_ wallet.paired() def client_lookup(self, id_): with self.lock: for client, (path, client_id) in self.clients.items(): if client_id == id_: return client return None def client_by_id(self, id_, handler): '''Returns a client for the device ID if one is registered. If a device is wiped or in bootloader mode pairing is impossible; in such cases we communicate by device ID and not wallet.''' self.scan_devices(handler) return self.client_lookup(id_) def client_for_wallet(self, plugin, wallet, force_pair): assert wallet.handler devices = self.scan_devices(wallet.handler) wallet_id = self.wallet_id(wallet) client = self.client_lookup(wallet_id) if client: # An unpaired client might have another wallet's handler # from a prior scan. Replace to fix dialog parenting. client.handler = wallet.handler return client for device in devices: if device.id_ == wallet_id: return self.create_client(device, wallet.handler, plugin) if force_pair: return self.force_pair_wallet(plugin, wallet, devices) return None def force_pair_wallet(self, plugin, wallet, devices): first_address, derivation = wallet.first_address() assert first_address # The wallet has not been previously paired, so let the user # choose an unpaired device and compare its first address. info = self.select_device(wallet, plugin, devices) client = self.client_lookup(info.device.id_) if client and client.is_pairable(): # See comment above for same code client.handler = wallet.handler # This will trigger a PIN/passphrase entry request try: client_first_address = client.first_address(derivation) except (UserCancelled, RuntimeError): # Bad / cancelled PIN / passphrase client_first_address = None if client_first_address == first_address: self.pair_wallet(wallet, info.device.id_) return client # The user input has wrong PIN or passphrase, or cancelled input, # or it is not pairable raise DeviceUnpairableError( _('Electrum cannot pair with your %s.\n\n' 'Before you request parkbytes to be sent to addresses in this ' 'wallet, ensure you can pair with your device, or that you have ' 'its seed (and passphrase, if any). Otherwise all parkbytes you ' 'receive will be unspendable.') % plugin.device) def unpaired_device_infos(self, handler, plugin, devices=None): '''Returns a list of DeviceInfo objects: one for each connected, unpaired device accepted by the plugin.''' if devices is None: devices = self.scan_devices(handler) devices = [dev for dev in devices if not self.wallet_by_id(dev.id_)] states = [_("wiped"), _("initialized")] infos = [] for device in devices: if not device.product_key in plugin.DEVICE_IDS: continue client = self.create_client(device, handler, plugin) if not client: continue state = states[client.is_initialized()] label = client.label() or _("An unnamed %s") % plugin.device descr = "%s (%s)" % (label, state) infos.append(DeviceInfo(device, descr, client.is_initialized())) return infos def select_device(self, wallet, plugin, devices=None): '''Ask the user to select a device to use if there is more than one, and return the DeviceInfo for the device.''' while True: infos = self.unpaired_device_infos(wallet.handler, plugin, devices) if infos: break msg = _('Could not connect to your %s. Verify the cable is ' 'connected and that no other application is using it.\n\n' 'Try to connect again?') % plugin.device if not wallet.handler.yes_no_question(msg): raise UserCancelled() devices = None if len(infos) == 1: return infos[0] msg = _("Please select which %s device to use:") % plugin.device descriptions = [info.description for info in infos] return infos[wallet.handler.query_choice(msg, descriptions)] def scan_devices(self, handler): # All currently supported hardware libraries use hid, so we # assume it here. This can be easily abstracted if necessary. # Note this import must be local so those without hardware # wallet libraries are not affected. import hid self.print_error("scanning devices...") # First see what's connected that we know about devices = [] for d in hid.enumerate(0, 0): product_key = (d['vendor_id'], d['product_id']) if product_key in self.recognised_hardware: # Older versions of hid don't provide interface_number interface_number = d.get('interface_number', 0) devices.append(Device(d['path'], interface_number, d['serial_number'], product_key)) # Now find out what was disconnected pairs = [(dev.path, dev.id_) for dev in devices] disconnected_ids = [] with self.lock: connected = {} for client, pair in self.clients.items(): if pair in pairs: connected[client] = pair else: disconnected_ids.append(pair[1]) self.clients = connected # Unpair disconnected devices for id_ in disconnected_ids: self.unpair_id(id_) return devices
mit
-1,006,099,610,933,601,900
34.1409
84
0.58913
false
tdyas/pants
tests/python/pants_test/backend/native/tasks/test_c_compile.py
1
2996
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from textwrap import dedent from pants.backend.native.targets.native_library import CLibrary from pants.backend.native.tasks.c_compile import CCompile from pants.backend.native.tasks.native_compile import ObjectFiles from pants_test.backend.native.tasks.native_task_test_base import ( NativeCompileTestMixin, NativeTaskTestBase, ) class CCompileTest(NativeTaskTestBase, NativeCompileTestMixin): @classmethod def task_type(cls): return CCompile def create_header_only_alternate_c_library(self, ext, **kwargs): header_filename = f"test{ext}" self.create_file( f"src/c/test/{header_filename}", contents=dedent( """ #ifndef __TEST_H__ #define __TEST_H__ int test(int); #endif """ ), ) return self.make_target( spec="src/c/test", target_type=CLibrary, sources=[header_filename], **kwargs ) def create_simple_c_library(self, **kwargs): self.create_file( "src/c/test/test.h", contents=dedent( """ #ifndef __TEST_H__ #define __TEST_H__ int test(int); #endif """ ), ) self.create_file( "src/c/test/test.c", contents=dedent( """ #include "test.h" int test(int x) { return x / 137; } """ ), ) return self.make_target( spec="src/c/test", target_type=CLibrary, sources=["test.h", "test.c"], **kwargs ) def test_header_only_noop_with_alternate_header_extension(self): alternate_extension = ".asdf" c = self.create_header_only_alternate_c_library(alternate_extension) context = self.prepare_context_for_compile( target_roots=[c], options={"c-compile-settings": {"header_file_extensions": [alternate_extension]}}, ) # Test that the task runs without error if provided a header-only library. c_compile = self.create_task(context) c_compile.execute() object_files_product = context.products.get(ObjectFiles) object_files_for_target = self._retrieve_single_product_at_target_base( object_files_product, c ) # Test that no object files were produced. self.assertEqual(0, len(object_files_for_target.filenames)) def test_caching(self): c = self.create_simple_c_library() context = self.prepare_context_for_compile(target_roots=[c]) c_compile = self.create_task(context) # TODO: what is this testing? c_compile.execute() c_compile.execute()
apache-2.0
-28,245,992,922,018,148
30.536842
94
0.563752
false
zhaochl/python-utils
tar_file_ftp/tar_file.py
1
1991
#!/usr/bin/env python # coding=utf-8 from file_util import * from pdb import * import commands import urllib2 #output = os.popen('ls') #print output.read() #print '----------------------------' #(status, output) = commands.getstatusoutput('ls') #print status, output def execute_cmd(cmd): _result={} (status, output) = commands.getstatusoutput(cmd) _result['status'] = status _result['output'] = output return _result def gen_ftp_sh(file_name): _content = """ ftp -n <<- EOF open timeplan.cn user name password cd /path/ bin put {} bye EOF """.format(file_name) return _content def gen_test_dir(dir_name): _content=""" if [ -d {} ];then echo "exist" exit else mkdir {} fi """.format(dir_name,dir_name) return _content def main(): name_list = read_file_line('list') content = '#!/bin/bash\n' content_file='' next_dir_index = 0 for index,name in enumerate(name_list): if len(name)==1: continue name = name.encode('utf8','ignore') dir_name = '_tmp_'+str(next_dir_index) content_file +='cp /path/'+name +' '+dir_name+'/\n' tar_name = dir_name+'.tar.gz' if index%100==0: f_name = '_bash_/bash_'+str(index)+'.sh' #content+='mkdir '+dir_name+'\n' content+=gen_test_dir(dir_name) content+=content_file content+="tar -zcvf "+ tar_name+' '+dir_name+'\n' content+= gen_ftp_sh(tar_name) content+='rm -rf '+tar_name+'\n' content+='rm -rf '+dir_name+'\n' content +="echo 'run at' `date +'%Y/%m/%d %H:%M:%S'`,file:"+tar_name+'\n' content_file='' next_dir_index = (index+100)/100 write_file(f_name,content) content = '#!/bin/bash\n' #if index>=2: # break print 'ok' if __name__=='__main__': #result = execute_cmd('ls') #print result['output'] main()
apache-2.0
8,161,350,097,538,408,000
24.525641
85
0.530889
false
idegtiarov/ceilometer
ceilometer/tests/unit/energy/test_kwapi.py
1
4535
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from keystoneauth1 import exceptions import mock from oslo_context import context from oslotest import base from oslotest import mockpatch import six from ceilometer.agent import manager from ceilometer.energy import kwapi PROBE_DICT = { "probes": { "A": { "timestamp": 1357730232.68754, "w": 107.3, "kwh": 0.001058255421506034 }, "B": { "timestamp": 1357730232.048158, "w": 15.0, "kwh": 0.029019045026169896 }, "C": { "timestamp": 1357730232.223375, "w": 95.0, "kwh": 0.17361822634312918 } } } ENDPOINT = 'end://point' class TestManager(manager.AgentManager): def __init__(self): super(TestManager, self).__init__() self._keystone = mock.Mock() class _BaseTestCase(base.BaseTestCase): @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) def setUp(self): super(_BaseTestCase, self).setUp() self.context = context.get_admin_context() self.manager = TestManager() class TestKwapi(_BaseTestCase): @staticmethod def fake_get_kwapi_client(ksclient, endpoint): raise exceptions.EndpointNotFound("fake keystone exception") def test_endpoint_not_exist(self): with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', side_effect=self.fake_get_kwapi_client): pollster = kwapi.EnergyPollster() samples = list(pollster.get_samples(self.manager, {}, [ENDPOINT])) self.assertEqual(0, len(samples)) class TestEnergyPollster(_BaseTestCase): pollster_cls = kwapi.EnergyPollster unit = 'kwh' def setUp(self): super(TestEnergyPollster, self).setUp() self.useFixture(mockpatch.PatchObject( kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) @staticmethod def fake_iter_probes(ksclient, cache, endpoint): probes = PROBE_DICT['probes'] for key, value in six.iteritems(probes): probe_dict = value probe_dict['id'] = key yield probe_dict def test_default_discovery(self): pollster = kwapi.EnergyPollster() self.assertEqual('endpoint:energy', pollster.default_discovery) def test_sample(self): cache = {} samples = list(self.pollster_cls().get_samples(self.manager, cache, [ENDPOINT])) self.assertEqual(len(PROBE_DICT['probes']), len(samples)) samples_by_name = dict((s.resource_id, s) for s in samples) for name, probe in PROBE_DICT['probes'].items(): sample = samples_by_name[name] expected = datetime.datetime.fromtimestamp( probe['timestamp'] ).isoformat() self.assertEqual(expected, sample.timestamp) self.assertEqual(probe[self.unit], sample.volume) class TestPowerPollster(TestEnergyPollster): pollster_cls = kwapi.PowerPollster unit = 'w' class TestEnergyPollsterCache(_BaseTestCase): pollster_cls = kwapi.EnergyPollster def test_get_samples_cached(self): probe = {'id': 'A'} probe.update(PROBE_DICT['probes']['A']) cache = { '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], } self.manager._keystone = mock.Mock() pollster = self.pollster_cls() with mock.patch.object(pollster, '_get_probes') as do_not_call: do_not_call.side_effect = AssertionError('should not be called') samples = list(pollster.get_samples(self.manager, cache, [ENDPOINT])) self.assertEqual(1, len(samples)) class TestPowerPollsterCache(TestEnergyPollsterCache): pollster_cls = kwapi.PowerPollster
apache-2.0
9,031,500,531,538,610,000
30.713287
77
0.615215
false
tensorflow/profiler
plugin/tensorboard_plugin_profile/convert/trace_events_json_test.py
1
4311
# -*- coding: utf-8 -*- # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests the Trace -> catapult JSON conversion.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import tensorflow as tf from google.protobuf import text_format from tensorboard_plugin_profile.convert import trace_events_json from tensorboard_plugin_profile.protobuf import trace_events_pb2 class TraceEventsJsonStreamTest(tf.test.TestCase): def convert(self, proto_text): proto = trace_events_pb2.Trace() text_format.Parse(proto_text, proto) return json.loads(''.join(trace_events_json.TraceEventsJsonStream(proto))) def testJsonConversion(self): self.assertEqual( self.convert(""" devices { key: 2 value { name: 'D2' device_id: 2 resources { key: 2 value { resource_id: 2 name: 'R2.2' } } } } devices { key: 1 value { name: 'D1' device_id: 1 resources { key: 2 value { resource_id: 1 name: 'R1.2' } } } } trace_events { device_id: 1 resource_id: 2 name: "E1.2.1" timestamp_ps: 100000 duration_ps: 10000 args { key: "label" value: "E1.2.1" } args { key: "extra" value: "extra info" } } trace_events { device_id: 2 resource_id: 2 name: "E2.2.1" timestamp_ps: 105000 } """), dict( displayTimeUnit='ns', metadata={'highres-ticks': True}, traceEvents=[ dict( ph='M', pid=1, name='process_name', args=dict(name='D1')), dict( ph='M', pid=1, name='process_sort_index', args=dict(sort_index=1)), dict( ph='M', pid=1, tid=2, name='thread_name', args=dict(name='R1.2')), dict( ph='M', pid=1, tid=2, name='thread_sort_index', args=dict(sort_index=2)), dict( ph='M', pid=2, name='process_name', args=dict(name='D2')), dict( ph='M', pid=2, name='process_sort_index', args=dict(sort_index=2)), dict( ph='M', pid=2, tid=2, name='thread_name', args=dict(name='R2.2')), dict( ph='M', pid=2, tid=2, name='thread_sort_index', args=dict(sort_index=2)), dict( ph='X', pid=1, tid=2, name='E1.2.1', ts=0.1, dur=0.01, args=dict(label='E1.2.1', extra='extra info')), dict(ph='i', pid=2, tid=2, name='E2.2.1', ts=0.105, s='t'), {}, ])) if __name__ == '__main__': tf.test.main()
apache-2.0
1,974,153,693,499,353,600
30.933333
80
0.429135
false
dpawlows/MGITM
srcPython/gitm_3d_test.py
1
1981
#!/usr/bin/env python ''' Open a GITM 3D file adn create a plot similar to the example given by Aaron. Note that as pybats.gitm is more developed, a plot like this should be made using syntax like, >>>a=gitm.GitmBin('filename') >>>a.add_alt_slice(0, 'Rho', add_cbar=True) That's how most pybats stuff works right now. ''' # Import shit. I needed a lot of shit this time. import numpy as np from spacepy.pybats import gitm import matplotlib.pyplot as plt from matplotlib.cm import get_cmap from matplotlib.ticker import ScalarFormatter, FormatStrFormatter # Open file. a=gitm.GitmBin('./3DALL_t061213_000000.bin') # Make contour of rho at lowest altitude (index 0). # Convert lat lon from rad to degrees. p=180.0/np.pi f=plt.figure() #make a fig. ax=f.add_subplot(111) #make an ax. # Create the contour for an altitude slice and call it 'cnt' (no jokes, please.) # The '61' is the number of contours; you could use a vector of values to set # levels manually if you wish. get_cmap accepts any of the color map names # from the colormap demo pic from the Matplotlib gallery; adding '_r' # reverses the colormap. cnt=ax.contourf(a['Longitude'][:,:,0]*p, p*a['Latitude'][:,:,0], a['Rho'][:,:,0], 61, cmap=get_cmap('Spectral_r')) # Configure axis. ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.set_title(r'$\rho$ at Altitude=%5.2f$km$' % (a['Altitude'][0,0,0]/1000.0)) f.suptitle('File=%s'%(a.attrs['file'])) # Add a colorbar and set the tick format to exponential notation. cb=plt.colorbar(cnt) cb.formatter=FormatStrFormatter('%7.2E') cb.update_ticks() # Add the quivers. ax.quiver(a['Longitude'][:,:,0]*p, p*a['Latitude'][:,:,0], a['V!Dn!N (east)'][:,:,0],a['V!Dn!N (north)'][:,:,0]) # Draw to screen. if plt.isinteractive(): plt.draw() #In interactive mode, you just "draw". else: # W/o interactive mode, "show" stops the user from typing more # at the terminal until plots are drawn. plt.show()
mit
-5,919,587,534,596,864,000
32.576271
80
0.67996
false
pxmkuruc/usd-qt
pxr/usdQt/_bindings.py
1
1347
# # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # from . import _usdQt from pxr import Tf Tf.PrepareModule(_usdQt, locals()) del _usdQt, Tf try: import __DOC __DOC.Execute(locals()) del __DOC except Exception: try: import __tmpDoc __tmpDoc.Execute(locals()) del __tmpDoc except: pass
apache-2.0
-7,059,305,401,523,659,000
32.675
74
0.726058
false
superfluidity/RDCL3D
code/toscaparser/elements/statefulentitytype.py
1
4045
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from toscaparser.common.exception import ExceptionCollector from toscaparser.common.exception import InvalidTypeError from toscaparser.elements.attribute_definition import AttributeDef from toscaparser.elements.entity_type import EntityType from toscaparser.elements.property_definition import PropertyDef from toscaparser.unsupportedtype import UnsupportedType class StatefulEntityType(EntityType): '''Class representing TOSCA states.''' interfaces_node_lifecycle_operations = ['create', 'configure', 'start', 'stop', 'delete'] interfaces_relationship_configure_operations = ['post_configure_source', 'post_configure_target', 'add_target', 'remove_target'] def __init__(self, entitytype, prefix, custom_def=None): entire_entitytype = entitytype if UnsupportedType.validate_type(entire_entitytype): self.defs = None else: if entitytype.startswith(self.TOSCA + ":"): entitytype = entitytype[(len(self.TOSCA) + 1):] entire_entitytype = prefix + entitytype if not entitytype.startswith(self.TOSCA): entire_entitytype = prefix + entitytype if entire_entitytype in list(self.TOSCA_DEF.keys()): self.defs = self.TOSCA_DEF[entire_entitytype] entitytype = entire_entitytype elif custom_def and entitytype in list(custom_def.keys()): self.defs = custom_def[entitytype] else: self.defs = None ExceptionCollector.appendException( InvalidTypeError(what=entitytype)) self.type = entitytype def get_properties_def_objects(self): '''Return a list of property definition objects.''' properties = [] props = self.get_definition(self.PROPERTIES) if props: for prop, schema in props.items(): properties.append(PropertyDef(prop, None, schema)) return properties def get_properties_def(self): '''Return a dictionary of property definition name-object pairs.''' return {prop.name: prop for prop in self.get_properties_def_objects()} def get_property_def_value(self, name): '''Return the property definition associated with a given name.''' props_def = self.get_properties_def() if props_def and name in props_def.keys(): return props_def[name].value def get_attributes_def_objects(self): '''Return a list of attribute definition objects.''' attrs = self.get_value(self.ATTRIBUTES, parent=True) if attrs: return [AttributeDef(attr, None, schema) for attr, schema in attrs.items()] return [] def get_attributes_def(self): '''Return a dictionary of attribute definition name-object pairs.''' return {attr.name: attr for attr in self.get_attributes_def_objects()} def get_attribute_def_value(self, name): '''Return the attribute definition associated with a given name.''' attrs_def = self.get_attributes_def() if attrs_def and name in attrs_def.keys(): return attrs_def[name].value
apache-2.0
2,767,152,885,875,572,700
43.450549
78
0.616564
false
toslunar/chainerrl
examples/mujoco/train_trpo_gym.py
1
7790
"""An example of training TRPO against OpenAI Gym Envs. This script is an example of training a TRPO agent against OpenAI Gym envs. Both discrete and continuous action spaces are supported. Chainer v3.1.0 or newer is required. To solve CartPole-v0, run: python train_trpo_gym.py --env CartPole-v0 --steps 100000 To solve InvertedPendulum-v1, run: python train_trpo_gym.py --env InvertedPendulum-v1 --steps 100000 """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() # NOQA import argparse import logging import os import chainer from chainer import functions as F import gym import gym.spaces import numpy as np import chainerrl def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=0, help='GPU device ID. Set to -1 to use CPUs only.') parser.add_argument('--env', type=str, default='Hopper-v2', help='Gym Env ID') parser.add_argument('--seed', type=int, default=0, help='Random seed [0, 2 ** 32)') parser.add_argument('--outdir', type=str, default='results', help='Directory path to save output files.' ' If it does not exist, it will be created.') parser.add_argument('--steps', type=int, default=10 ** 6, help='Total time steps for training.') parser.add_argument('--eval-interval', type=int, default=10000, help='Interval between evaluation phases in steps.') parser.add_argument('--eval-n-runs', type=int, default=10, help='Number of episodes ran in an evaluation phase') parser.add_argument('--render', action='store_true', default=False, help='Render the env') parser.add_argument('--demo', action='store_true', default=False, help='Run demo episodes, not training') parser.add_argument('--load', type=str, default='', help='Directory path to load a saved agent data from' ' if it is a non-empty string.') parser.add_argument('--trpo-update-interval', type=int, default=5000, help='Interval steps of TRPO iterations.') parser.add_argument('--logger-level', type=int, default=logging.INFO, help='Level of the root logger.') parser.add_argument('--monitor', action='store_true', help='Monitor the env by gym.wrappers.Monitor.' ' Videos and additional log will be saved.') args = parser.parse_args() logging.basicConfig(level=args.logger_level) # Set random seed chainerrl.misc.set_random_seed(args.seed, gpus=(args.gpu,)) args.outdir = chainerrl.experiments.prepare_output_dir(args, args.outdir) def make_env(test): env = gym.make(args.env) # Use different random seeds for train and test envs env_seed = 2 ** 32 - args.seed if test else args.seed env.seed(env_seed) # Cast observations to float32 because our model uses float32 env = chainerrl.wrappers.CastObservationToFloat32(env) if args.monitor: env = chainerrl.wrappers.Monitor(env, args.outdir) if args.render: env = chainerrl.wrappers.Render(env) return env env = make_env(test=False) timestep_limit = env.spec.tags.get( 'wrapper_config.TimeLimit.max_episode_steps') obs_space = env.observation_space action_space = env.action_space print('Observation space:', obs_space) print('Action space:', action_space) if not isinstance(obs_space, gym.spaces.Box): print("""\ This example only supports gym.spaces.Box observation spaces. To apply it to other observation spaces, use a custom phi function that convert an observation to numpy.ndarray of numpy.float32.""") # NOQA return # Normalize observations based on their empirical mean and variance obs_normalizer = chainerrl.links.EmpiricalNormalization( obs_space.low.size) if isinstance(action_space, gym.spaces.Box): # Use a Gaussian policy for continuous action spaces policy = \ chainerrl.policies.FCGaussianPolicyWithStateIndependentCovariance( obs_space.low.size, action_space.low.size, n_hidden_channels=64, n_hidden_layers=2, mean_wscale=0.01, nonlinearity=F.tanh, var_type='diagonal', var_func=lambda x: F.exp(2 * x), # Parameterize log std var_param_init=0, # log std = 0 => std = 1 ) elif isinstance(action_space, gym.spaces.Discrete): # Use a Softmax policy for discrete action spaces policy = chainerrl.policies.FCSoftmaxPolicy( obs_space.low.size, action_space.n, n_hidden_channels=64, n_hidden_layers=2, last_wscale=0.01, nonlinearity=F.tanh, ) else: print("""\ TRPO only supports gym.spaces.Box or gym.spaces.Discrete action spaces.""") # NOQA return # Use a value function to reduce variance vf = chainerrl.v_functions.FCVFunction( obs_space.low.size, n_hidden_channels=64, n_hidden_layers=2, last_wscale=0.01, nonlinearity=F.tanh, ) if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() policy.to_gpu(args.gpu) vf.to_gpu(args.gpu) obs_normalizer.to_gpu(args.gpu) # TRPO's policy is optimized via CG and line search, so it doesn't require # a chainer.Optimizer. Only the value function needs it. vf_opt = chainer.optimizers.Adam() vf_opt.setup(vf) # Draw the computational graph and save it in the output directory. fake_obs = chainer.Variable( policy.xp.zeros(obs_space.low.shape, dtype=np.float32)[None], name='observation') chainerrl.misc.draw_computational_graph( [policy(fake_obs)], os.path.join(args.outdir, 'policy')) chainerrl.misc.draw_computational_graph( [vf(fake_obs)], os.path.join(args.outdir, 'vf')) # Hyperparameters in http://arxiv.org/abs/1709.06560 agent = chainerrl.agents.TRPO( policy=policy, vf=vf, vf_optimizer=vf_opt, obs_normalizer=obs_normalizer, update_interval=args.trpo_update_interval, conjugate_gradient_max_iter=20, conjugate_gradient_damping=1e-1, gamma=0.995, lambd=0.97, vf_epochs=5, entropy_coef=0, ) if args.load: agent.load(args.load) if args.demo: env = make_env(test=True) eval_stats = chainerrl.experiments.eval_performance( env=env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs, max_episode_len=timestep_limit) print('n_runs: {} mean: {} median: {} stdev {}'.format( args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev'])) else: chainerrl.experiments.train_agent_with_evaluation( agent=agent, env=env, eval_env=make_env(test=True), outdir=args.outdir, steps=args.steps, eval_n_steps=None, eval_n_episodes=args.eval_n_runs, eval_interval=args.eval_interval, train_max_episode_len=timestep_limit, ) if __name__ == '__main__': main()
mit
8,461,528,065,418,983,000
36.095238
83
0.610398
false
lfloeer/hiprofile
lineprofile/utils.py
1
4109
import numpy as np import itertools as it def sample_prior(n_sampler, fitter, thermal_noise=0.023, thermal_noise_std=0.01): """Given a fitter object and the number of samplers, sample the prior distribution of the fit parameters for use as the initial positions for the walkers. There are two exceptions: 1) The outlier fraction is only sampled on the interval (fraction_min, fraction_min + 1), i.e. only in the lowest decade allowed by the prior distribution. 2) The initial values for the inlier standard deviation are drawn from a gaussian distribution determined by the parameters `thermal_noise` and `thermal_noise_std`. """ def sample_components(): """Get samples from prior on line profile""" for component_idx in range(fitter.n_disks): yield np.random.uniform(fitter.fint_min, fitter.fint_max, n_sampler) yield np.random.normal(fitter.v_center_mean[component_idx], fitter.v_center_std[component_idx], n_sampler) yield np.random.gamma(fitter.v_rot_k, fitter.v_rot_theta, n_sampler) yield fitter.turbulence_min + np.random.gamma(fitter.turbulence_k, fitter.turbulence_theta, n_sampler) yield np.random.beta(fitter.fsolid_p, fitter.fsolid_q, n_sampler) yield 2 * np.random.beta(fitter.asym_p, fitter.asym_q, n_sampler) - 1.0 def sample_gaussians(): """Get samples from prior on gaussians""" for component_idx in range(fitter.n_disks, fitter.n_disks + fitter.n_gaussians): yield np.random.uniform(fitter.fint_min, fitter.fint_max, n_sampler) yield np.random.normal(fitter.v_center_mean[component_idx], fitter.v_center_std[component_idx], n_sampler) yield np.random.uniform(fitter.gauss_disp_min, fitter.gauss_disp_max, n_sampler) def sample_baseline(): """Get samples from prior on baseline""" for _ in range(fitter.n_baseline): yield np.random.normal(0, 0.1, n_sampler) def sample_likelihood(): """Get samples from prior on posterior parameters""" yield np.random.uniform(fitter.fraction_min, fitter.fraction_min + 1, n_sampler) std_in_values = np.clip( np.random.normal(thermal_noise, thermal_noise_std, n_sampler), 1e-6, 1e6 ) std_in_values = np.log10(std_in_values) yield np.clip(std_in_values, fitter.std_in_min, fitter.std_in_max) yield np.random.normal(0., fitter.mu_out_std, n_sampler) yield np.random.uniform(fitter.std_out_min, fitter.std_out_max, n_sampler) prior_it = it.chain(sample_components(), sample_gaussians(), sample_baseline(), sample_likelihood()) return np.array([samples for samples in prior_it]).T.copy() def resample_position(position, n_walkers, n_dim, fitter, ball_size=1e-2): """Use rejection sampling to resample the walker positions""" scale_factors = np.ones(n_dim) scale_factors[3:6 * fitter.n_disks:6] = 10 scale_factors[2:6 * fitter.n_disks:6] = 100 scale_factors[1:6 * fitter.n_disks:6] = 10 scale_factors *= ball_size new_positions = np.array([position + scale_factors * np.random.randn(n_dim) for _ in xrange(n_walkers)]) valid = np.array([np.isfinite(fitter.ln_prior(p)) for p in new_positions]) for _ in xrange(20): n_invalid = np.sum(~valid) if n_invalid == 0: break new_positions[~valid] = np.array([position + ball_size * np.random.randn(n_dim) for _ in xrange(n_invalid)]) valid[~valid] = np.array([np.isfinite(fitter.ln_prior(p)) for p in new_positions[~valid]]) return new_positions
mit
-1,081,512,766,663,581,800
46.77907
104
0.590655
false
JPinSPACE/AdventOfCode
day07/02_override_wire/solution.py
1
1717
""" Solution to the second puzzle of Day 7 on adventofcode.com """ import os PARTS = {} CACHE = {} def compute(value): """ Recursion is dumb. """ if value in CACHE: return CACHE[value] if value.isdigit(): return int(value) value = PARTS[value] if 'NOT' in value: value_a = value.split(' ')[1] return ~ compute(value_a) try: (value_a, operation, value_b) = value.split(' ') computed_a = compute(value_a) CACHE[value_a] = computed_a computed_b = compute(value_b) CACHE[value_b] = computed_b if operation == 'AND': computed = compute(value_a) & compute(value_b) elif operation == 'OR': computed = compute(value_a) | compute(value_b) elif operation == 'LSHIFT': computed = compute(value_a) << compute(value_b) elif operation == 'RSHIFT': computed = compute(value_a) >> compute(value_b) else: print "Topaz lied!" return computed except ValueError: return compute(value) def main(): """ Read in circuit instructions and assemble them! """ # pylint: disable=W0603 global CACHE basedir = os.path.dirname(os.path.realpath(__file__)) file_path = os.path.join(basedir, 'input') with open(file_path, 'r') as input_file: for line in input_file: line = line.strip() (operation, name) = line.split(' -> ') PARTS[name] = operation signal_a = compute('a') CACHE = {} PARTS['b'] = str(signal_a) solution = compute('a') print solution assert solution == 14710 if __name__ == '__main__': main()
mit
4,521,620,091,914,956,300
21.298701
62
0.550379
false
onshape-public/onshape-clients
python/onshape_client/oas/models/bt_translation_request_info.py
1
6823
# coding: utf-8 """ Onshape REST API The Onshape REST API consumed by all clients. # noqa: E501 The version of the OpenAPI document: 1.113 Contact: [email protected] Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 import sys # noqa: F401 import six # noqa: F401 import nulltype # noqa: F401 from onshape_client.oas.model_utils import ( # noqa: F401 ModelComposed, ModelNormal, ModelSimple, date, datetime, file_type, int, none_type, str, validate_get_composed_info, ) class BTTranslationRequestInfo(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { ("request_state",): {"ACTIVE": "ACTIVE", "DONE": "DONE", "FAILED": "FAILED",}, } validations = {} additional_properties_type = None @staticmethod def openapi_types(): """ This must be a class method so a model may have properties that are of type self, this ensures that we don't create a cyclic import Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { "document_id": (str,), # noqa: E501 "failure_reason": (str,), # noqa: E501 "href": (str,), # noqa: E501 "id": (str,), # noqa: E501 "name": (str,), # noqa: E501 "request_element_id": (str,), # noqa: E501 "request_state": (str,), # noqa: E501 "result_document_id": (str,), # noqa: E501 "result_element_ids": ([str],), # noqa: E501 "result_external_data_ids": ([str],), # noqa: E501 "result_workspace_id": (str,), # noqa: E501 "version_id": (str,), # noqa: E501 "view_ref": (str,), # noqa: E501 "workspace_id": (str,), # noqa: E501 } @staticmethod def discriminator(): return None attribute_map = { "document_id": "documentId", # noqa: E501 "failure_reason": "failureReason", # noqa: E501 "href": "href", # noqa: E501 "id": "id", # noqa: E501 "name": "name", # noqa: E501 "request_element_id": "requestElementId", # noqa: E501 "request_state": "requestState", # noqa: E501 "result_document_id": "resultDocumentId", # noqa: E501 "result_element_ids": "resultElementIds", # noqa: E501 "result_external_data_ids": "resultExternalDataIds", # noqa: E501 "result_workspace_id": "resultWorkspaceId", # noqa: E501 "version_id": "versionId", # noqa: E501 "view_ref": "viewRef", # noqa: E501 "workspace_id": "workspaceId", # noqa: E501 } @staticmethod def _composed_schemas(): return None required_properties = set( [ "_data_store", "_check_type", "_from_server", "_path_to_item", "_configuration", ] ) def __init__( self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs ): # noqa: E501 """bt_translation_request_info.BTTranslationRequestInfo - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _from_server (bool): True if the data is from the server False if the data is from the client (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. document_id (str): [optional] # noqa: E501 failure_reason (str): [optional] # noqa: E501 href (str): [optional] # noqa: E501 id (str): [optional] # noqa: E501 name (str): [optional] # noqa: E501 request_element_id (str): [optional] # noqa: E501 request_state (str): [optional] # noqa: E501 result_document_id (str): [optional] # noqa: E501 result_element_ids ([str]): [optional] # noqa: E501 result_external_data_ids ([str]): [optional] # noqa: E501 result_workspace_id (str): [optional] # noqa: E501 version_id (str): [optional] # noqa: E501 view_ref (str): [optional] # noqa: E501 workspace_id (str): [optional] # noqa: E501 """ self._data_store = {} self._check_type = _check_type self._from_server = _from_server self._path_to_item = _path_to_item self._configuration = _configuration for var_name, var_value in six.iteritems(kwargs): if ( var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value)
mit
-7,788,842,874,007,090,000
36.081522
92
0.556793
false
damoxc/ganeti
lib/opcodes.py
1
68014
# # # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """OpCodes module This module implements the data structures which define the cluster operations - the so-called opcodes. Every operation which modifies the cluster state is expressed via opcodes. """ # this are practically structures, so disable the message about too # few public methods: # pylint: disable=R0903 import logging import re import ipaddr from ganeti import constants from ganeti import errors from ganeti import ht from ganeti import objects from ganeti import outils # Common opcode attributes #: output fields for a query operation _POutputFields = ("output_fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), "Selected output fields") #: the shutdown timeout _PShutdownTimeout = \ ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TNonNegativeInt, "How long to wait for instance to shut down") #: the force parameter _PForce = ("force", False, ht.TBool, "Whether to force the operation") #: a required instance name (for single-instance LUs) _PInstanceName = ("instance_name", ht.NoDefault, ht.TNonEmptyString, "Instance name") #: Whether to ignore offline nodes _PIgnoreOfflineNodes = ("ignore_offline_nodes", False, ht.TBool, "Whether to ignore offline nodes") #: a required node name (for single-node LUs) _PNodeName = ("node_name", ht.NoDefault, ht.TNonEmptyString, "Node name") #: a required node group name (for single-group LUs) _PGroupName = ("group_name", ht.NoDefault, ht.TNonEmptyString, "Group name") #: Migration type (live/non-live) _PMigrationMode = ("mode", None, ht.TMaybe(ht.TElemOf(constants.HT_MIGRATION_MODES)), "Migration mode") #: Obsolete 'live' migration mode (boolean) _PMigrationLive = ("live", None, ht.TMaybeBool, "Legacy setting for live migration, do not use") #: Tag type _PTagKind = ("kind", ht.NoDefault, ht.TElemOf(constants.VALID_TAG_TYPES), "Tag kind") #: List of tag strings _PTags = ("tags", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), "List of tag names") _PForceVariant = ("force_variant", False, ht.TBool, "Whether to force an unknown OS variant") _PWaitForSync = ("wait_for_sync", True, ht.TBool, "Whether to wait for the disk to synchronize") _PWaitForSyncFalse = ("wait_for_sync", False, ht.TBool, "Whether to wait for the disk to synchronize" " (defaults to false)") _PIgnoreConsistency = ("ignore_consistency", False, ht.TBool, "Whether to ignore disk consistency") _PStorageName = ("name", ht.NoDefault, ht.TMaybeString, "Storage name") _PUseLocking = ("use_locking", False, ht.TBool, "Whether to use synchronization") _PNameCheck = ("name_check", True, ht.TBool, "Whether to check name") _PNodeGroupAllocPolicy = \ ("alloc_policy", None, ht.TMaybe(ht.TElemOf(constants.VALID_ALLOC_POLICIES)), "Instance allocation policy") _PGroupNodeParams = ("ndparams", None, ht.TMaybeDict, "Default node parameters for group") _PQueryWhat = ("what", ht.NoDefault, ht.TElemOf(constants.QR_VIA_OP), "Resource(s) to query for") _PEarlyRelease = ("early_release", False, ht.TBool, "Whether to release locks as soon as possible") _PIpCheckDoc = "Whether to ensure instance's IP address is inactive" #: Do not remember instance state changes _PNoRemember = ("no_remember", False, ht.TBool, "Do not remember the state change") #: Target node for instance migration/failover _PMigrationTargetNode = ("target_node", None, ht.TMaybeString, "Target node for shared-storage instances") _PStartupPaused = ("startup_paused", False, ht.TBool, "Pause instance at startup") _PVerbose = ("verbose", False, ht.TBool, "Verbose mode") # Parameters for cluster verification _PDebugSimulateErrors = ("debug_simulate_errors", False, ht.TBool, "Whether to simulate errors (useful for debugging)") _PErrorCodes = ("error_codes", False, ht.TBool, "Error codes") _PSkipChecks = ("skip_checks", ht.EmptyList, ht.TListOf(ht.TElemOf(constants.VERIFY_OPTIONAL_CHECKS)), "Which checks to skip") _PIgnoreErrors = ("ignore_errors", ht.EmptyList, ht.TListOf(ht.TElemOf(constants.CV_ALL_ECODES_STRINGS)), "List of error codes that should be treated as warnings") # Disk parameters _PDiskParams = \ ("diskparams", None, ht.TMaybe(ht.TDictOf(ht.TElemOf(constants.DISK_TEMPLATES), ht.TDict)), "Disk templates' parameter defaults") # Parameters for node resource model _PHvState = ("hv_state", None, ht.TMaybeDict, "Set hypervisor states") _PDiskState = ("disk_state", None, ht.TMaybeDict, "Set disk states") #: Opportunistic locking _POpportunisticLocking = \ ("opportunistic_locking", False, ht.TBool, ("Whether to employ opportunistic locking for nodes, meaning nodes" " already locked by another opcode won't be considered for instance" " allocation (only when an iallocator is used)")) _PIgnoreIpolicy = ("ignore_ipolicy", False, ht.TBool, "Whether to ignore ipolicy violations") # Allow runtime changes while migrating _PAllowRuntimeChgs = ("allow_runtime_changes", True, ht.TBool, "Allow runtime changes (eg. memory ballooning)") #: IAllocator field builder _PIAllocFromDesc = lambda desc: ("iallocator", None, ht.TMaybeString, desc) #: a required network name _PNetworkName = ("network_name", ht.NoDefault, ht.TNonEmptyString, "Set network name") _PTargetGroups = \ ("target_groups", None, ht.TMaybeListOf(ht.TNonEmptyString), "Destination group names or UUIDs (defaults to \"all but current group\")") #: OP_ID conversion regular expression _OPID_RE = re.compile("([a-z])([A-Z])") #: Utility function for L{OpClusterSetParams} _TestClusterOsListItem = \ ht.TAnd(ht.TIsLength(2), ht.TItems([ ht.TElemOf(constants.DDMS_VALUES), ht.TNonEmptyString, ])) _TestClusterOsList = ht.TMaybeListOf(_TestClusterOsListItem) # TODO: Generate check from constants.INIC_PARAMS_TYPES #: Utility function for testing NIC definitions _TestNicDef = \ ht.Comment("NIC parameters")(ht.TDictOf(ht.TElemOf(constants.INIC_PARAMS), ht.TMaybeString)) _TSetParamsResultItemItems = [ ht.Comment("name of changed parameter")(ht.TNonEmptyString), ht.Comment("new value")(ht.TAny), ] _TSetParamsResult = \ ht.TListOf(ht.TAnd(ht.TIsLength(len(_TSetParamsResultItemItems)), ht.TItems(_TSetParamsResultItemItems))) # In the disks option we can provide arbitrary parameters too, which # we may not be able to validate at this level, so we just check the # format of the dict here and the checks concerning IDISK_PARAMS will # happen at the LU level _TDiskParams = \ ht.Comment("Disk parameters")(ht.TDictOf(ht.TNonEmptyString, ht.TOr(ht.TNonEmptyString, ht.TInt))) _TQueryRow = \ ht.TListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ht.TElemOf(constants.RS_ALL), ht.TAny]))) _TQueryResult = ht.TListOf(_TQueryRow) _TOldQueryRow = ht.TListOf(ht.TAny) _TOldQueryResult = ht.TListOf(_TOldQueryRow) _SUMMARY_PREFIX = { "CLUSTER_": "C_", "GROUP_": "G_", "NODE_": "N_", "INSTANCE_": "I_", } #: Attribute name for dependencies DEPEND_ATTR = "depends" #: Attribute name for comment COMMENT_ATTR = "comment" def _NameToId(name): """Convert an opcode class name to an OP_ID. @type name: string @param name: the class name, as OpXxxYyy @rtype: string @return: the name in the OP_XXXX_YYYY format """ if not name.startswith("Op"): return None # Note: (?<=[a-z])(?=[A-Z]) would be ideal, since it wouldn't # consume any input, and hence we would just have all the elements # in the list, one by one; but it seems that split doesn't work on # non-consuming input, hence we have to process the input string a # bit name = _OPID_RE.sub(r"\1,\2", name) elems = name.split(",") return "_".join(n.upper() for n in elems) def _GenerateObjectTypeCheck(obj, fields_types): """Helper to generate type checks for objects. @param obj: The object to generate type checks @param fields_types: The fields and their types as a dict @return: A ht type check function """ assert set(obj.GetAllSlots()) == set(fields_types.keys()), \ "%s != %s" % (set(obj.GetAllSlots()), set(fields_types.keys())) return ht.TStrictDict(True, True, fields_types) _TQueryFieldDef = \ _GenerateObjectTypeCheck(objects.QueryFieldDefinition, { "name": ht.TNonEmptyString, "title": ht.TNonEmptyString, "kind": ht.TElemOf(constants.QFT_ALL), "doc": ht.TNonEmptyString, }) def RequireFileStorage(): """Checks that file storage is enabled. While it doesn't really fit into this module, L{utils} was deemed too large of a dependency to be imported for just one or two functions. @raise errors.OpPrereqError: when file storage is disabled """ if not constants.ENABLE_FILE_STORAGE: raise errors.OpPrereqError("File storage disabled at configure time", errors.ECODE_INVAL) def RequireSharedFileStorage(): """Checks that shared file storage is enabled. While it doesn't really fit into this module, L{utils} was deemed too large of a dependency to be imported for just one or two functions. @raise errors.OpPrereqError: when shared file storage is disabled """ if not constants.ENABLE_SHARED_FILE_STORAGE: raise errors.OpPrereqError("Shared file storage disabled at" " configure time", errors.ECODE_INVAL) @ht.WithDesc("CheckFileStorage") def _CheckFileStorage(value): """Ensures file storage is enabled if used. """ if value == constants.DT_FILE: RequireFileStorage() elif value == constants.DT_SHARED_FILE: RequireSharedFileStorage() return True def _BuildDiskTemplateCheck(accept_none): """Builds check for disk template. @type accept_none: bool @param accept_none: whether to accept None as a correct value @rtype: callable """ template_check = ht.TElemOf(constants.DISK_TEMPLATES) if accept_none: template_check = ht.TMaybe(template_check) return ht.TAnd(template_check, _CheckFileStorage) def _CheckStorageType(storage_type): """Ensure a given storage type is valid. """ if storage_type not in constants.VALID_STORAGE_TYPES: raise errors.OpPrereqError("Unknown storage type: %s" % storage_type, errors.ECODE_INVAL) if storage_type == constants.ST_FILE: # TODO: What about shared file storage? RequireFileStorage() return True #: Storage type parameter _PStorageType = ("storage_type", ht.NoDefault, _CheckStorageType, "Storage type") @ht.WithDesc("IPv4 network") def _CheckCIDRNetNotation(value): """Ensure a given CIDR notation type is valid. """ try: ipaddr.IPv4Network(value) except ipaddr.AddressValueError: return False return True @ht.WithDesc("IPv4 address") def _CheckCIDRAddrNotation(value): """Ensure a given CIDR notation type is valid. """ try: ipaddr.IPv4Address(value) except ipaddr.AddressValueError: return False return True @ht.WithDesc("IPv6 address") def _CheckCIDR6AddrNotation(value): """Ensure a given CIDR notation type is valid. """ try: ipaddr.IPv6Address(value) except ipaddr.AddressValueError: return False return True @ht.WithDesc("IPv6 network") def _CheckCIDR6NetNotation(value): """Ensure a given CIDR notation type is valid. """ try: ipaddr.IPv6Network(value) except ipaddr.AddressValueError: return False return True _TIpAddress4 = ht.TAnd(ht.TString, _CheckCIDRAddrNotation) _TIpAddress6 = ht.TAnd(ht.TString, _CheckCIDR6AddrNotation) _TIpNetwork4 = ht.TAnd(ht.TString, _CheckCIDRNetNotation) _TIpNetwork6 = ht.TAnd(ht.TString, _CheckCIDR6NetNotation) _TMaybeAddr4List = ht.TMaybe(ht.TListOf(_TIpAddress4)) class _AutoOpParamSlots(outils.AutoSlots): """Meta class for opcode definitions. """ def __new__(mcs, name, bases, attrs): """Called when a class should be created. @param mcs: The meta class @param name: Name of created class @param bases: Base classes @type attrs: dict @param attrs: Class attributes """ assert "OP_ID" not in attrs, "Class '%s' defining OP_ID" % name slots = mcs._GetSlots(attrs) assert "OP_DSC_FIELD" not in attrs or attrs["OP_DSC_FIELD"] in slots, \ "Class '%s' uses unknown field in OP_DSC_FIELD" % name assert ("OP_DSC_FORMATTER" not in attrs or callable(attrs["OP_DSC_FORMATTER"])), \ ("Class '%s' uses non-callable in OP_DSC_FORMATTER (%s)" % (name, type(attrs["OP_DSC_FORMATTER"]))) attrs["OP_ID"] = _NameToId(name) return outils.AutoSlots.__new__(mcs, name, bases, attrs) @classmethod def _GetSlots(mcs, attrs): """Build the slots out of OP_PARAMS. """ # Always set OP_PARAMS to avoid duplicates in BaseOpCode.GetAllParams params = attrs.setdefault("OP_PARAMS", []) # Use parameter names as slots return [pname for (pname, _, _, _) in params] class BaseOpCode(outils.ValidatedSlots): """A simple serializable object. This object serves as a parent class for OpCode without any custom field handling. """ # pylint: disable=E1101 # as OP_ID is dynamically defined __metaclass__ = _AutoOpParamSlots def __getstate__(self): """Generic serializer. This method just returns the contents of the instance as a dictionary. @rtype: C{dict} @return: the instance attributes and their values """ state = {} for name in self.GetAllSlots(): if hasattr(self, name): state[name] = getattr(self, name) return state def __setstate__(self, state): """Generic unserializer. This method just restores from the serialized state the attributes of the current instance. @param state: the serialized opcode data @type state: C{dict} """ if not isinstance(state, dict): raise ValueError("Invalid data to __setstate__: expected dict, got %s" % type(state)) for name in self.GetAllSlots(): if name not in state and hasattr(self, name): delattr(self, name) for name in state: setattr(self, name, state[name]) @classmethod def GetAllParams(cls): """Compute list of all parameters for an opcode. """ slots = [] for parent in cls.__mro__: slots.extend(getattr(parent, "OP_PARAMS", [])) return slots def Validate(self, set_defaults): # pylint: disable=W0221 """Validate opcode parameters, optionally setting default values. @type set_defaults: bool @param set_defaults: Whether to set default values @raise errors.OpPrereqError: When a parameter value doesn't match requirements """ for (attr_name, default, test, _) in self.GetAllParams(): assert test == ht.NoType or callable(test) if not hasattr(self, attr_name): if default == ht.NoDefault: raise errors.OpPrereqError("Required parameter '%s.%s' missing" % (self.OP_ID, attr_name), errors.ECODE_INVAL) elif set_defaults: if callable(default): dval = default() else: dval = default setattr(self, attr_name, dval) if test == ht.NoType: # no tests here continue if set_defaults or hasattr(self, attr_name): attr_val = getattr(self, attr_name) if not test(attr_val): logging.error("OpCode %s, parameter %s, has invalid type %s/value" " '%s' expecting type %s", self.OP_ID, attr_name, type(attr_val), attr_val, test) raise errors.OpPrereqError("Parameter '%s.%s' fails validation" % (self.OP_ID, attr_name), errors.ECODE_INVAL) def _BuildJobDepCheck(relative): """Builds check for job dependencies (L{DEPEND_ATTR}). @type relative: bool @param relative: Whether to accept relative job IDs (negative) @rtype: callable """ if relative: job_id = ht.TOr(ht.TJobId, ht.TRelativeJobId) else: job_id = ht.TJobId job_dep = \ ht.TAnd(ht.TOr(ht.TList, ht.TTuple), ht.TIsLength(2), ht.TItems([job_id, ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))])) return ht.TMaybeListOf(job_dep) TNoRelativeJobDependencies = _BuildJobDepCheck(False) #: List of submission status and job ID as returned by C{SubmitManyJobs} _TJobIdListItem = \ ht.TAnd(ht.TIsLength(2), ht.TItems([ht.Comment("success")(ht.TBool), ht.Comment("Job ID if successful, error message" " otherwise")(ht.TOr(ht.TString, ht.TJobId))])) TJobIdList = ht.TListOf(_TJobIdListItem) #: Result containing only list of submitted jobs TJobIdListOnly = ht.TStrictDict(True, True, { constants.JOB_IDS_KEY: ht.Comment("List of submitted jobs")(TJobIdList), }) class OpCode(BaseOpCode): """Abstract OpCode. This is the root of the actual OpCode hierarchy. All clases derived from this class should override OP_ID. @cvar OP_ID: The ID of this opcode. This should be unique amongst all children of this class. @cvar OP_DSC_FIELD: The name of a field whose value will be included in the string returned by Summary(); see the docstring of that method for details). @cvar OP_DSC_FORMATTER: A callable that should format the OP_DSC_FIELD; if not present, then the field will be simply converted to string @cvar OP_PARAMS: List of opcode attributes, the default values they should get if not already defined, and types they must match. @cvar OP_RESULT: Callable to verify opcode result @cvar WITH_LU: Boolean that specifies whether this should be included in mcpu's dispatch table @ivar dry_run: Whether the LU should be run in dry-run mode, i.e. just the check steps @ivar priority: Opcode priority for queue """ # pylint: disable=E1101 # as OP_ID is dynamically defined WITH_LU = True OP_PARAMS = [ ("dry_run", None, ht.TMaybeBool, "Run checks only, don't execute"), ("debug_level", None, ht.TMaybe(ht.TNonNegativeInt), "Debug level"), ("priority", constants.OP_PRIO_DEFAULT, ht.TElemOf(constants.OP_PRIO_SUBMIT_VALID), "Opcode priority"), (DEPEND_ATTR, None, _BuildJobDepCheck(True), "Job dependencies; if used through ``SubmitManyJobs`` relative (negative)" " job IDs can be used; see :doc:`design document <design-chained-jobs>`" " for details"), (COMMENT_ATTR, None, ht.TMaybeString, "Comment describing the purpose of the opcode"), ] OP_RESULT = None def __getstate__(self): """Specialized getstate for opcodes. This method adds to the state dictionary the OP_ID of the class, so that on unload we can identify the correct class for instantiating the opcode. @rtype: C{dict} @return: the state as a dictionary """ data = BaseOpCode.__getstate__(self) data["OP_ID"] = self.OP_ID return data @classmethod def LoadOpCode(cls, data): """Generic load opcode method. The method identifies the correct opcode class from the dict-form by looking for a OP_ID key, if this is not found, or its value is not available in this module as a child of this class, we fail. @type data: C{dict} @param data: the serialized opcode """ if not isinstance(data, dict): raise ValueError("Invalid data to LoadOpCode (%s)" % type(data)) if "OP_ID" not in data: raise ValueError("Invalid data to LoadOpcode, missing OP_ID") op_id = data["OP_ID"] op_class = None if op_id in OP_MAPPING: op_class = OP_MAPPING[op_id] else: raise ValueError("Invalid data to LoadOpCode: OP_ID %s unsupported" % op_id) op = op_class() new_data = data.copy() del new_data["OP_ID"] op.__setstate__(new_data) return op def Summary(self): """Generates a summary description of this opcode. The summary is the value of the OP_ID attribute (without the "OP_" prefix), plus the value of the OP_DSC_FIELD attribute, if one was defined; this field should allow to easily identify the operation (for an instance creation job, e.g., it would be the instance name). """ assert self.OP_ID is not None and len(self.OP_ID) > 3 # all OP_ID start with OP_, we remove that txt = self.OP_ID[3:] field_name = getattr(self, "OP_DSC_FIELD", None) if field_name: field_value = getattr(self, field_name, None) field_formatter = getattr(self, "OP_DSC_FORMATTER", None) if callable(field_formatter): field_value = field_formatter(field_value) elif isinstance(field_value, (list, tuple)): field_value = ",".join(str(i) for i in field_value) txt = "%s(%s)" % (txt, field_value) return txt def TinySummary(self): """Generates a compact summary description of the opcode. """ assert self.OP_ID.startswith("OP_") text = self.OP_ID[3:] for (prefix, supplement) in _SUMMARY_PREFIX.items(): if text.startswith(prefix): return supplement + text[len(prefix):] return text # cluster opcodes class OpClusterPostInit(OpCode): """Post cluster initialization. This opcode does not touch the cluster at all. Its purpose is to run hooks after the cluster has been initialized. """ OP_RESULT = ht.TBool class OpClusterDestroy(OpCode): """Destroy the cluster. This opcode has no other parameters. All the state is irreversibly lost after the execution of this opcode. """ OP_RESULT = ht.TNonEmptyString class OpClusterQuery(OpCode): """Query cluster information.""" OP_RESULT = ht.TDictOf(ht.TNonEmptyString, ht.TAny) class OpClusterVerify(OpCode): """Submits all jobs necessary to verify the cluster. """ OP_PARAMS = [ _PDebugSimulateErrors, _PErrorCodes, _PSkipChecks, _PIgnoreErrors, _PVerbose, ("group_name", None, ht.TMaybeString, "Group to verify"), ] OP_RESULT = TJobIdListOnly class OpClusterVerifyConfig(OpCode): """Verify the cluster config. """ OP_PARAMS = [ _PDebugSimulateErrors, _PErrorCodes, _PIgnoreErrors, _PVerbose, ] OP_RESULT = ht.TBool class OpClusterVerifyGroup(OpCode): """Run verify on a node group from the cluster. @type skip_checks: C{list} @ivar skip_checks: steps to be skipped from the verify process; this needs to be a subset of L{constants.VERIFY_OPTIONAL_CHECKS}; currently only L{constants.VERIFY_NPLUSONE_MEM} can be passed """ OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, _PDebugSimulateErrors, _PErrorCodes, _PSkipChecks, _PIgnoreErrors, _PVerbose, ] OP_RESULT = ht.TBool class OpClusterVerifyDisks(OpCode): """Verify the cluster disks. """ OP_RESULT = TJobIdListOnly class OpGroupVerifyDisks(OpCode): """Verifies the status of all disks in a node group. Result: a tuple of three elements: - dict of node names with issues (values: error msg) - list of instances with degraded disks (that should be activated) - dict of instances with missing logical volumes (values: (node, vol) pairs with details about the missing volumes) In normal operation, all lists should be empty. A non-empty instance list (3rd element of the result) is still ok (errors were fixed) but non-empty node list means some node is down, and probably there are unfixable drbd errors. Note that only instances that are drbd-based are taken into consideration. This might need to be revisited in the future. """ OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, ] OP_RESULT = \ ht.TAnd(ht.TIsLength(3), ht.TItems([ht.TDictOf(ht.TString, ht.TString), ht.TListOf(ht.TString), ht.TDictOf(ht.TString, ht.TListOf(ht.TListOf(ht.TString)))])) class OpClusterRepairDiskSizes(OpCode): """Verify the disk sizes of the instances and fixes configuration mimatches. Parameters: optional instances list, in case we want to restrict the checks to only a subset of the instances. Result: a list of tuples, (instance, disk, new-size) for changed configurations. In normal operation, the list should be empty. @type instances: list @ivar instances: the list of instances to check, or empty for all instances """ OP_PARAMS = [ ("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None), ] OP_RESULT = ht.TListOf(ht.TAnd(ht.TIsLength(3), ht.TItems([ht.TNonEmptyString, ht.TNonNegativeInt, ht.TNonNegativeInt]))) class OpClusterConfigQuery(OpCode): """Query cluster configuration values.""" OP_PARAMS = [ _POutputFields, ] OP_RESULT = ht.TListOf(ht.TAny) class OpClusterRename(OpCode): """Rename the cluster. @type name: C{str} @ivar name: The new name of the cluster. The name and/or the master IP address will be changed to match the new name and its IP address. """ OP_DSC_FIELD = "name" OP_PARAMS = [ ("name", ht.NoDefault, ht.TNonEmptyString, None), ] OP_RESULT = ht.TNonEmptyString class OpClusterSetParams(OpCode): """Change the parameters of the cluster. @type vg_name: C{str} or C{None} @ivar vg_name: The new volume group name or None to disable LVM usage. """ OP_PARAMS = [ _PHvState, _PDiskState, ("vg_name", None, ht.TMaybe(ht.TString), "Volume group name"), ("enabled_hypervisors", None, ht.TMaybe(ht.TAnd(ht.TListOf(ht.TElemOf(constants.HYPER_TYPES)), ht.TTrue)), "List of enabled hypervisors"), ("hvparams", None, ht.TMaybe(ht.TDictOf(ht.TNonEmptyString, ht.TDict)), "Cluster-wide hypervisor parameter defaults, hypervisor-dependent"), ("beparams", None, ht.TMaybeDict, "Cluster-wide backend parameter defaults"), ("os_hvp", None, ht.TMaybe(ht.TDictOf(ht.TNonEmptyString, ht.TDict)), "Cluster-wide per-OS hypervisor parameter defaults"), ("osparams", None, ht.TMaybe(ht.TDictOf(ht.TNonEmptyString, ht.TDict)), "Cluster-wide OS parameter defaults"), _PDiskParams, ("candidate_pool_size", None, ht.TMaybe(ht.TPositiveInt), "Master candidate pool size"), ("uid_pool", None, ht.NoType, "Set UID pool, must be list of lists describing UID ranges (two items," " start and end inclusive)"), ("add_uids", None, ht.NoType, "Extend UID pool, must be list of lists describing UID ranges (two" " items, start and end inclusive) to be added"), ("remove_uids", None, ht.NoType, "Shrink UID pool, must be list of lists describing UID ranges (two" " items, start and end inclusive) to be removed"), ("maintain_node_health", None, ht.TMaybeBool, "Whether to automatically maintain node health"), ("prealloc_wipe_disks", None, ht.TMaybeBool, "Whether to wipe disks before allocating them to instances"), ("nicparams", None, ht.TMaybeDict, "Cluster-wide NIC parameter defaults"), ("ndparams", None, ht.TMaybeDict, "Cluster-wide node parameter defaults"), ("ipolicy", None, ht.TMaybeDict, "Cluster-wide :ref:`instance policy <rapi-ipolicy>` specs"), ("drbd_helper", None, ht.TMaybe(ht.TString), "DRBD helper program"), ("default_iallocator", None, ht.TMaybe(ht.TString), "Default iallocator for cluster"), ("master_netdev", None, ht.TMaybe(ht.TString), "Master network device"), ("master_netmask", None, ht.TMaybe(ht.TNonNegativeInt), "Netmask of the master IP"), ("reserved_lvs", None, ht.TMaybeListOf(ht.TNonEmptyString), "List of reserved LVs"), ("hidden_os", None, _TestClusterOsList, "Modify list of hidden operating systems: each modification must have" " two items, the operation and the OS name; the operation can be" " ``%s`` or ``%s``" % (constants.DDM_ADD, constants.DDM_REMOVE)), ("blacklisted_os", None, _TestClusterOsList, "Modify list of blacklisted operating systems: each modification must" " have two items, the operation and the OS name; the operation can be" " ``%s`` or ``%s``" % (constants.DDM_ADD, constants.DDM_REMOVE)), ("use_external_mip_script", None, ht.TMaybeBool, "Whether to use an external master IP address setup script"), ] OP_RESULT = ht.TNone class OpClusterRedistConf(OpCode): """Force a full push of the cluster configuration. """ OP_RESULT = ht.TNone class OpClusterActivateMasterIp(OpCode): """Activate the master IP on the master node. """ OP_RESULT = ht.TNone class OpClusterDeactivateMasterIp(OpCode): """Deactivate the master IP on the master node. """ OP_RESULT = ht.TNone class OpQuery(OpCode): """Query for resources/items. @ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP} @ivar fields: List of fields to retrieve @ivar qfilter: Query filter """ OP_DSC_FIELD = "what" OP_PARAMS = [ _PQueryWhat, _PUseLocking, ("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), "Requested fields"), ("qfilter", None, ht.TMaybe(ht.TList), "Query filter"), ] OP_RESULT = \ _GenerateObjectTypeCheck(objects.QueryResponse, { "fields": ht.TListOf(_TQueryFieldDef), "data": _TQueryResult, }) class OpQueryFields(OpCode): """Query for available resource/item fields. @ivar what: Resources to query for, must be one of L{constants.QR_VIA_OP} @ivar fields: List of fields to retrieve """ OP_DSC_FIELD = "what" OP_PARAMS = [ _PQueryWhat, ("fields", None, ht.TMaybeListOf(ht.TNonEmptyString), "Requested fields; if not given, all are returned"), ] OP_RESULT = \ _GenerateObjectTypeCheck(objects.QueryFieldsResponse, { "fields": ht.TListOf(_TQueryFieldDef), }) class OpOobCommand(OpCode): """Interact with OOB.""" OP_PARAMS = [ ("node_names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "List of nodes to run the OOB command against"), ("command", ht.NoDefault, ht.TElemOf(constants.OOB_COMMANDS), "OOB command to be run"), ("timeout", constants.OOB_TIMEOUT, ht.TInt, "Timeout before the OOB helper will be terminated"), ("ignore_status", False, ht.TBool, "Ignores the node offline status for power off"), ("power_delay", constants.OOB_POWER_DELAY, ht.TNonNegativeFloat, "Time in seconds to wait between powering on nodes"), ] # Fixme: Make it more specific with all the special cases in LUOobCommand OP_RESULT = _TQueryResult class OpRestrictedCommand(OpCode): """Runs a restricted command on node(s). """ OP_PARAMS = [ _PUseLocking, ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), "Nodes on which the command should be run (at least one)"), ("command", ht.NoDefault, ht.TNonEmptyString, "Command name (no parameters)"), ] _RESULT_ITEMS = [ ht.Comment("success")(ht.TBool), ht.Comment("output or error message")(ht.TString), ] OP_RESULT = \ ht.TListOf(ht.TAnd(ht.TIsLength(len(_RESULT_ITEMS)), ht.TItems(_RESULT_ITEMS))) # node opcodes class OpNodeRemove(OpCode): """Remove a node. @type node_name: C{str} @ivar node_name: The name of the node to remove. If the node still has instances on it, the operation will fail. """ OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, ] OP_RESULT = ht.TNone class OpNodeAdd(OpCode): """Add a node to the cluster. @type node_name: C{str} @ivar node_name: The name of the node to add. This can be a short name, but it will be expanded to the FQDN. @type primary_ip: IP address @ivar primary_ip: The primary IP of the node. This will be ignored when the opcode is submitted, but will be filled during the node add (so it will be visible in the job query). @type secondary_ip: IP address @ivar secondary_ip: The secondary IP of the node. This needs to be passed if the cluster has been initialized in 'dual-network' mode, otherwise it must not be given. @type readd: C{bool} @ivar readd: Whether to re-add an existing node to the cluster. If this is not passed, then the operation will abort if the node name is already in the cluster; use this parameter to 'repair' a node that had its configuration broken, or was reinstalled without removal from the cluster. @type group: C{str} @ivar group: The node group to which this node will belong. @type vm_capable: C{bool} @ivar vm_capable: The vm_capable node attribute @type master_capable: C{bool} @ivar master_capable: The master_capable node attribute """ OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, _PHvState, _PDiskState, ("primary_ip", None, ht.NoType, "Primary IP address"), ("secondary_ip", None, ht.TMaybeString, "Secondary IP address"), ("readd", False, ht.TBool, "Whether node is re-added to cluster"), ("group", None, ht.TMaybeString, "Initial node group"), ("master_capable", None, ht.TMaybeBool, "Whether node can become master or master candidate"), ("vm_capable", None, ht.TMaybeBool, "Whether node can host instances"), ("ndparams", None, ht.TMaybeDict, "Node parameters"), ] OP_RESULT = ht.TNone class OpNodeQuery(OpCode): """Compute the list of nodes.""" OP_PARAMS = [ _POutputFields, _PUseLocking, ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Empty list to query all nodes, node names otherwise"), ] OP_RESULT = _TOldQueryResult class OpNodeQueryvols(OpCode): """Get list of volumes on node.""" OP_PARAMS = [ _POutputFields, ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Empty list to query all nodes, node names otherwise"), ] OP_RESULT = ht.TListOf(ht.TAny) class OpNodeQueryStorage(OpCode): """Get information on storage for node(s).""" OP_PARAMS = [ _POutputFields, _PStorageType, ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "List of nodes"), ("name", None, ht.TMaybeString, "Storage name"), ] OP_RESULT = _TOldQueryResult class OpNodeModifyStorage(OpCode): """Modifies the properies of a storage unit""" OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, _PStorageType, _PStorageName, ("changes", ht.NoDefault, ht.TDict, "Requested changes"), ] OP_RESULT = ht.TNone class OpRepairNodeStorage(OpCode): """Repairs the volume group on a node.""" OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, _PStorageType, _PStorageName, _PIgnoreConsistency, ] OP_RESULT = ht.TNone class OpNodeSetParams(OpCode): """Change the parameters of a node.""" OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, _PForce, _PHvState, _PDiskState, ("master_candidate", None, ht.TMaybeBool, "Whether the node should become a master candidate"), ("offline", None, ht.TMaybeBool, "Whether the node should be marked as offline"), ("drained", None, ht.TMaybeBool, "Whether the node should be marked as drained"), ("auto_promote", False, ht.TBool, "Whether node(s) should be promoted to master candidate if necessary"), ("master_capable", None, ht.TMaybeBool, "Denote whether node can become master or master candidate"), ("vm_capable", None, ht.TMaybeBool, "Denote whether node can host instances"), ("secondary_ip", None, ht.TMaybeString, "Change node's secondary IP address"), ("ndparams", None, ht.TMaybeDict, "Set node parameters"), ("powered", None, ht.TMaybeBool, "Whether the node should be marked as powered"), ] OP_RESULT = _TSetParamsResult class OpNodePowercycle(OpCode): """Tries to powercycle a node.""" OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, _PForce, ] OP_RESULT = ht.TMaybeString class OpNodeMigrate(OpCode): """Migrate all instances from a node.""" OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PNodeName, _PMigrationMode, _PMigrationLive, _PMigrationTargetNode, _PAllowRuntimeChgs, _PIgnoreIpolicy, _PIAllocFromDesc("Iallocator for deciding the target node" " for shared-storage instances"), ] OP_RESULT = TJobIdListOnly class OpNodeEvacuate(OpCode): """Evacuate instances off a number of nodes.""" OP_DSC_FIELD = "node_name" OP_PARAMS = [ _PEarlyRelease, _PNodeName, ("remote_node", None, ht.TMaybeString, "New secondary node"), _PIAllocFromDesc("Iallocator for computing solution"), ("mode", ht.NoDefault, ht.TElemOf(constants.NODE_EVAC_MODES), "Node evacuation mode"), ] OP_RESULT = TJobIdListOnly # instance opcodes class OpInstanceCreate(OpCode): """Create an instance. @ivar instance_name: Instance name @ivar mode: Instance creation mode (one of L{constants.INSTANCE_CREATE_MODES}) @ivar source_handshake: Signed handshake from source (remote import only) @ivar source_x509_ca: Source X509 CA in PEM format (remote import only) @ivar source_instance_name: Previous name of instance (remote import only) @ivar source_shutdown_timeout: Shutdown timeout used for source instance (remote import only) """ OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PForceVariant, _PWaitForSync, _PNameCheck, _PIgnoreIpolicy, _POpportunisticLocking, ("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"), ("disks", ht.NoDefault, ht.TListOf(_TDiskParams), "Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;" " each disk definition must contain a ``%s`` value and" " can contain an optional ``%s`` value denoting the disk access mode" " (%s)" % (constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_SIZE, constants.IDISK_MODE, " or ".join("``%s``" % i for i in sorted(constants.DISK_ACCESS_SET)))), ("disk_template", ht.NoDefault, _BuildDiskTemplateCheck(True), "Disk template"), ("file_driver", None, ht.TMaybe(ht.TElemOf(constants.FILE_DRIVER)), "Driver for file-backed disks"), ("file_storage_dir", None, ht.TMaybeString, "Directory for storing file-backed disks"), ("hvparams", ht.EmptyDict, ht.TDict, "Hypervisor parameters for instance, hypervisor-dependent"), ("hypervisor", None, ht.TMaybeString, "Hypervisor"), _PIAllocFromDesc("Iallocator for deciding which node(s) to use"), ("identify_defaults", False, ht.TBool, "Reset instance parameters to default if equal"), ("ip_check", True, ht.TBool, _PIpCheckDoc), ("conflicts_check", True, ht.TBool, "Check for conflicting IPs"), ("mode", ht.NoDefault, ht.TElemOf(constants.INSTANCE_CREATE_MODES), "Instance creation mode"), ("nics", ht.NoDefault, ht.TListOf(_TestNicDef), "List of NIC (network interface) definitions, for example" " ``[{}, {}, {\"%s\": \"198.51.100.4\"}]``; each NIC definition can" " contain the optional values %s" % (constants.INIC_IP, ", ".join("``%s``" % i for i in sorted(constants.INIC_PARAMS)))), ("no_install", None, ht.TMaybeBool, "Do not install the OS (will disable automatic start)"), ("osparams", ht.EmptyDict, ht.TDict, "OS parameters for instance"), ("os_type", None, ht.TMaybeString, "Operating system"), ("pnode", None, ht.TMaybeString, "Primary node"), ("snode", None, ht.TMaybeString, "Secondary node"), ("source_handshake", None, ht.TMaybe(ht.TList), "Signed handshake from source (remote import only)"), ("source_instance_name", None, ht.TMaybeString, "Source instance name (remote import only)"), ("source_shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TNonNegativeInt, "How long source instance was given to shut down (remote import only)"), ("source_x509_ca", None, ht.TMaybeString, "Source X509 CA in PEM format (remote import only)"), ("src_node", None, ht.TMaybeString, "Source node for import"), ("src_path", None, ht.TMaybeString, "Source directory for import"), ("start", True, ht.TBool, "Whether to start instance after creation"), ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance tags"), ] OP_RESULT = ht.Comment("instance nodes")(ht.TListOf(ht.TNonEmptyString)) class OpInstanceMultiAlloc(OpCode): """Allocates multiple instances. """ OP_PARAMS = [ _POpportunisticLocking, _PIAllocFromDesc("Iallocator used to allocate all the instances"), ("instances", ht.EmptyList, ht.TListOf(ht.TInstanceOf(OpInstanceCreate)), "List of instance create opcodes describing the instances to allocate"), ] _JOB_LIST = ht.Comment("List of submitted jobs")(TJobIdList) ALLOCATABLE_KEY = "allocatable" FAILED_KEY = "allocatable" OP_RESULT = ht.TStrictDict(True, True, { constants.JOB_IDS_KEY: _JOB_LIST, ALLOCATABLE_KEY: ht.TListOf(ht.TNonEmptyString), FAILED_KEY: ht.TListOf(ht.TNonEmptyString), }) def __getstate__(self): """Generic serializer. """ state = OpCode.__getstate__(self) if hasattr(self, "instances"): # pylint: disable=E1101 state["instances"] = [inst.__getstate__() for inst in self.instances] return state def __setstate__(self, state): """Generic unserializer. This method just restores from the serialized state the attributes of the current instance. @param state: the serialized opcode data @type state: C{dict} """ if not isinstance(state, dict): raise ValueError("Invalid data to __setstate__: expected dict, got %s" % type(state)) if "instances" in state: state["instances"] = map(OpCode.LoadOpCode, state["instances"]) return OpCode.__setstate__(self, state) def Validate(self, set_defaults): """Validates this opcode. We do this recursively. """ OpCode.Validate(self, set_defaults) for inst in self.instances: # pylint: disable=E1101 inst.Validate(set_defaults) class OpInstanceReinstall(OpCode): """Reinstall an instance's OS.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PForceVariant, ("os_type", None, ht.TMaybeString, "Instance operating system"), ("osparams", None, ht.TMaybeDict, "Temporary OS parameters"), ] OP_RESULT = ht.TNone class OpInstanceRemove(OpCode): """Remove an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PShutdownTimeout, ("ignore_failures", False, ht.TBool, "Whether to ignore failures during removal"), ] OP_RESULT = ht.TNone class OpInstanceRename(OpCode): """Rename an instance.""" OP_PARAMS = [ _PInstanceName, _PNameCheck, ("new_name", ht.NoDefault, ht.TNonEmptyString, "New instance name"), ("ip_check", False, ht.TBool, _PIpCheckDoc), ] OP_RESULT = ht.Comment("New instance name")(ht.TNonEmptyString) class OpInstanceStartup(OpCode): """Startup an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PForce, _PIgnoreOfflineNodes, ("hvparams", ht.EmptyDict, ht.TDict, "Temporary hypervisor parameters, hypervisor-dependent"), ("beparams", ht.EmptyDict, ht.TDict, "Temporary backend parameters"), _PNoRemember, _PStartupPaused, ] OP_RESULT = ht.TNone class OpInstanceShutdown(OpCode): """Shutdown an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PForce, _PIgnoreOfflineNodes, ("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT, ht.TNonNegativeInt, "How long to wait for instance to shut down"), _PNoRemember, ] OP_RESULT = ht.TNone class OpInstanceReboot(OpCode): """Reboot an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PShutdownTimeout, ("ignore_secondaries", False, ht.TBool, "Whether to start the instance even if secondary disks are failing"), ("reboot_type", ht.NoDefault, ht.TElemOf(constants.REBOOT_TYPES), "How to reboot instance"), ("reason", (constants.INSTANCE_REASON_SOURCE_UNKNOWN, None), ht.TAnd(ht.TIsLength(2), ht.TItems([ ht.TElemOf(constants.INSTANCE_REASON_SOURCES), ht.TMaybeString, ])), "The reason why the reboot is happening"), ] OP_RESULT = ht.TNone class OpInstanceReplaceDisks(OpCode): """Replace the disks of an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PEarlyRelease, _PIgnoreIpolicy, ("mode", ht.NoDefault, ht.TElemOf(constants.REPLACE_MODES), "Replacement mode"), ("disks", ht.EmptyList, ht.TListOf(ht.TNonNegativeInt), "Disk indexes"), ("remote_node", None, ht.TMaybeString, "New secondary node"), _PIAllocFromDesc("Iallocator for deciding new secondary node"), ] OP_RESULT = ht.TNone class OpInstanceFailover(OpCode): """Failover an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PShutdownTimeout, _PIgnoreConsistency, _PMigrationTargetNode, _PIgnoreIpolicy, _PIAllocFromDesc("Iallocator for deciding the target node for" " shared-storage instances"), ] OP_RESULT = ht.TNone class OpInstanceMigrate(OpCode): """Migrate an instance. This migrates (without shutting down an instance) to its secondary node. @ivar instance_name: the name of the instance @ivar mode: the migration mode (live, non-live or None for auto) """ OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PMigrationMode, _PMigrationLive, _PMigrationTargetNode, _PAllowRuntimeChgs, _PIgnoreIpolicy, ("cleanup", False, ht.TBool, "Whether a previously failed migration should be cleaned up"), _PIAllocFromDesc("Iallocator for deciding the target node for" " shared-storage instances"), ("allow_failover", False, ht.TBool, "Whether we can fallback to failover if migration is not possible"), ] OP_RESULT = ht.TNone class OpInstanceMove(OpCode): """Move an instance. This move (with shutting down an instance and data copying) to an arbitrary node. @ivar instance_name: the name of the instance @ivar target_node: the destination node """ OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PShutdownTimeout, _PIgnoreIpolicy, ("target_node", ht.NoDefault, ht.TNonEmptyString, "Target node"), _PIgnoreConsistency, ] OP_RESULT = ht.TNone class OpInstanceConsole(OpCode): """Connect to an instance's console.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, ] OP_RESULT = ht.TDict class OpInstanceActivateDisks(OpCode): """Activate an instance's disks.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, ("ignore_size", False, ht.TBool, "Whether to ignore recorded size"), _PWaitForSyncFalse, ] OP_RESULT = ht.TListOf(ht.TAnd(ht.TIsLength(3), ht.TItems([ht.TNonEmptyString, ht.TNonEmptyString, ht.TNonEmptyString]))) class OpInstanceDeactivateDisks(OpCode): """Deactivate an instance's disks.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PForce, ] OP_RESULT = ht.TNone class OpInstanceRecreateDisks(OpCode): """Recreate an instance's disks.""" _TDiskChanges = \ ht.TAnd(ht.TIsLength(2), ht.TItems([ht.Comment("Disk index")(ht.TNonNegativeInt), ht.Comment("Parameters")(_TDiskParams)])) OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, ("disks", ht.EmptyList, ht.TOr(ht.TListOf(ht.TNonNegativeInt), ht.TListOf(_TDiskChanges)), "List of disk indexes (deprecated) or a list of tuples containing a disk" " index and a possibly empty dictionary with disk parameter changes"), ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "New instance nodes, if relocation is desired"), _PIAllocFromDesc("Iallocator for deciding new nodes"), ] OP_RESULT = ht.TNone class OpInstanceQuery(OpCode): """Compute the list of instances.""" OP_PARAMS = [ _POutputFields, _PUseLocking, ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Empty list to query all instances, instance names otherwise"), ] OP_RESULT = _TOldQueryResult class OpInstanceQueryData(OpCode): """Compute the run-time status of instances.""" OP_PARAMS = [ _PUseLocking, ("instances", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Instance names"), ("static", False, ht.TBool, "Whether to only return configuration data without querying" " nodes"), ] OP_RESULT = ht.TDictOf(ht.TNonEmptyString, ht.TDict) def _TestInstSetParamsModList(fn): """Generates a check for modification lists. """ # Old format # TODO: Remove in version 2.8 including support in LUInstanceSetParams old_mod_item_fn = \ ht.TAnd(ht.TIsLength(2), ht.TItems([ ht.TOr(ht.TElemOf(constants.DDMS_VALUES), ht.TNonNegativeInt), fn, ])) # New format, supporting adding/removing disks/NICs at arbitrary indices mod_item_fn = \ ht.TAnd(ht.TIsLength(3), ht.TItems([ ht.TElemOf(constants.DDMS_VALUES_WITH_MODIFY), ht.Comment("Disk index, can be negative, e.g. -1 for last disk")(ht.TInt), fn, ])) return ht.TOr(ht.Comment("Recommended")(ht.TListOf(mod_item_fn)), ht.Comment("Deprecated")(ht.TListOf(old_mod_item_fn))) class OpInstanceSetParams(OpCode): """Change the parameters of an instance. """ TestNicModifications = _TestInstSetParamsModList(_TestNicDef) TestDiskModifications = _TestInstSetParamsModList(_TDiskParams) OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PForce, _PForceVariant, _PIgnoreIpolicy, ("nics", ht.EmptyList, TestNicModifications, "List of NIC changes: each item is of the form ``(op, index, settings)``," " ``op`` is one of ``%s``, ``%s`` or ``%s``, ``index`` can be either -1" " to refer to the last position, or a zero-based index number; a" " deprecated version of this parameter used the form ``(op, settings)``," " where ``op`` can be ``%s`` to add a new NIC with the specified" " settings, ``%s`` to remove the last NIC or a number to modify the" " settings of the NIC with that index" % (constants.DDM_ADD, constants.DDM_MODIFY, constants.DDM_REMOVE, constants.DDM_ADD, constants.DDM_REMOVE)), ("disks", ht.EmptyList, TestDiskModifications, "List of disk changes; see ``nics``"), ("beparams", ht.EmptyDict, ht.TDict, "Per-instance backend parameters"), ("runtime_mem", None, ht.TMaybePositiveInt, "New runtime memory"), ("hvparams", ht.EmptyDict, ht.TDict, "Per-instance hypervisor parameters, hypervisor-dependent"), ("disk_template", None, ht.TMaybe(_BuildDiskTemplateCheck(False)), "Disk template for instance"), ("remote_node", None, ht.TMaybeString, "Secondary node (used when changing disk template)"), ("os_name", None, ht.TMaybeString, "Change the instance's OS without reinstalling the instance"), ("osparams", None, ht.TMaybeDict, "Per-instance OS parameters"), ("wait_for_sync", True, ht.TBool, "Whether to wait for the disk to synchronize, when changing template"), ("offline", None, ht.TMaybeBool, "Whether to mark instance as offline"), ("conflicts_check", True, ht.TBool, "Check for conflicting IPs"), ] OP_RESULT = _TSetParamsResult class OpInstanceGrowDisk(OpCode): """Grow a disk of an instance.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PWaitForSync, ("disk", ht.NoDefault, ht.TInt, "Disk index"), ("amount", ht.NoDefault, ht.TNonNegativeInt, "Amount of disk space to add (megabytes)"), ("absolute", False, ht.TBool, "Whether the amount parameter is an absolute target or a relative one"), ] OP_RESULT = ht.TNone class OpInstanceChangeGroup(OpCode): """Moves an instance to another node group.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PEarlyRelease, _PIAllocFromDesc("Iallocator for computing solution"), _PTargetGroups, ] OP_RESULT = TJobIdListOnly # Node group opcodes class OpGroupAdd(OpCode): """Add a node group to the cluster.""" OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, _PNodeGroupAllocPolicy, _PGroupNodeParams, _PDiskParams, _PHvState, _PDiskState, ("ipolicy", None, ht.TMaybeDict, "Group-wide :ref:`instance policy <rapi-ipolicy>` specs"), ] OP_RESULT = ht.TNone class OpGroupAssignNodes(OpCode): """Assign nodes to a node group.""" OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, _PForce, ("nodes", ht.NoDefault, ht.TListOf(ht.TNonEmptyString), "List of nodes to assign"), ] OP_RESULT = ht.TNone class OpGroupQuery(OpCode): """Compute the list of node groups.""" OP_PARAMS = [ _POutputFields, ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Empty list to query all groups, group names otherwise"), ] OP_RESULT = _TOldQueryResult class OpGroupSetParams(OpCode): """Change the parameters of a node group.""" OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, _PNodeGroupAllocPolicy, _PGroupNodeParams, _PDiskParams, _PHvState, _PDiskState, ("ipolicy", None, ht.TMaybeDict, "Group-wide instance policy specs"), ] OP_RESULT = _TSetParamsResult class OpGroupRemove(OpCode): """Remove a node group from the cluster.""" OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, ] OP_RESULT = ht.TNone class OpGroupRename(OpCode): """Rename a node group in the cluster.""" OP_PARAMS = [ _PGroupName, ("new_name", ht.NoDefault, ht.TNonEmptyString, "New group name"), ] OP_RESULT = ht.Comment("New group name")(ht.TNonEmptyString) class OpGroupEvacuate(OpCode): """Evacuate a node group in the cluster.""" OP_DSC_FIELD = "group_name" OP_PARAMS = [ _PGroupName, _PEarlyRelease, _PIAllocFromDesc("Iallocator for computing solution"), _PTargetGroups, ] OP_RESULT = TJobIdListOnly # OS opcodes class OpOsDiagnose(OpCode): """Compute the list of guest operating systems.""" OP_PARAMS = [ _POutputFields, ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Which operating systems to diagnose"), ] OP_RESULT = _TOldQueryResult # ExtStorage opcodes class OpExtStorageDiagnose(OpCode): """Compute the list of external storage providers.""" OP_PARAMS = [ _POutputFields, ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Which ExtStorage Provider to diagnose"), ] OP_RESULT = _TOldQueryResult # Exports opcodes class OpBackupQuery(OpCode): """Compute the list of exported images.""" OP_PARAMS = [ _PUseLocking, ("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Empty list to query all nodes, node names otherwise"), ] OP_RESULT = ht.TDictOf(ht.TNonEmptyString, ht.TOr(ht.Comment("False on error")(ht.TBool), ht.TListOf(ht.TNonEmptyString))) class OpBackupPrepare(OpCode): """Prepares an instance export. @ivar instance_name: Instance name @ivar mode: Export mode (one of L{constants.EXPORT_MODES}) """ OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, ("mode", ht.NoDefault, ht.TElemOf(constants.EXPORT_MODES), "Export mode"), ] OP_RESULT = ht.TMaybeDict class OpBackupExport(OpCode): """Export an instance. For local exports, the export destination is the node name. For remote exports, the export destination is a list of tuples, each consisting of hostname/IP address, port, magic, HMAC and HMAC salt. The HMAC is calculated using the cluster domain secret over the value "${index}:${hostname}:${port}". The destination X509 CA must be a signed certificate. @ivar mode: Export mode (one of L{constants.EXPORT_MODES}) @ivar target_node: Export destination @ivar x509_key_name: X509 key to use (remote export only) @ivar destination_x509_ca: Destination X509 CA in PEM format (remote export only) """ OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, _PShutdownTimeout, # TODO: Rename target_node as it changes meaning for different export modes # (e.g. "destination") ("target_node", ht.NoDefault, ht.TOr(ht.TNonEmptyString, ht.TList), "Destination information, depends on export mode"), ("shutdown", True, ht.TBool, "Whether to shutdown instance before export"), ("remove_instance", False, ht.TBool, "Whether to remove instance after export"), ("ignore_remove_failures", False, ht.TBool, "Whether to ignore failures while removing instances"), ("mode", constants.EXPORT_MODE_LOCAL, ht.TElemOf(constants.EXPORT_MODES), "Export mode"), ("x509_key_name", None, ht.TMaybe(ht.TList), "Name of X509 key (remote export only)"), ("destination_x509_ca", None, ht.TMaybeString, "Destination X509 CA (remote export only)"), ] OP_RESULT = \ ht.TAnd(ht.TIsLength(2), ht.TItems([ ht.Comment("Finalizing status")(ht.TBool), ht.Comment("Status for every exported disk")(ht.TListOf(ht.TBool)), ])) class OpBackupRemove(OpCode): """Remove an instance's export.""" OP_DSC_FIELD = "instance_name" OP_PARAMS = [ _PInstanceName, ] OP_RESULT = ht.TNone # Tags opcodes class OpTagsGet(OpCode): """Returns the tags of the given object.""" OP_DSC_FIELD = "name" OP_PARAMS = [ _PTagKind, # Not using _PUseLocking as the default is different for historical reasons ("use_locking", True, ht.TBool, "Whether to use synchronization"), # Name is only meaningful for nodes and instances ("name", ht.NoDefault, ht.TMaybeString, "Name of object to retrieve tags from"), ] OP_RESULT = ht.TListOf(ht.TNonEmptyString) class OpTagsSearch(OpCode): """Searches the tags in the cluster for a given pattern.""" OP_DSC_FIELD = "pattern" OP_PARAMS = [ ("pattern", ht.NoDefault, ht.TNonEmptyString, "Search pattern (regular expression)"), ] OP_RESULT = ht.TListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ ht.TNonEmptyString, ht.TNonEmptyString, ]))) class OpTagsSet(OpCode): """Add a list of tags on a given object.""" OP_PARAMS = [ _PTagKind, _PTags, # Name is only meaningful for groups, nodes and instances ("name", ht.NoDefault, ht.TMaybeString, "Name of object where tag(s) should be added"), ] OP_RESULT = ht.TNone class OpTagsDel(OpCode): """Remove a list of tags from a given object.""" OP_PARAMS = [ _PTagKind, _PTags, # Name is only meaningful for groups, nodes and instances ("name", ht.NoDefault, ht.TMaybeString, "Name of object where tag(s) should be deleted"), ] OP_RESULT = ht.TNone # Test opcodes class OpTestDelay(OpCode): """Sleeps for a configured amount of time. This is used just for debugging and testing. Parameters: - duration: the time to sleep, in seconds - on_master: if true, sleep on the master - on_nodes: list of nodes in which to sleep If the on_master parameter is true, it will execute a sleep on the master (before any node sleep). If the on_nodes list is not empty, it will sleep on those nodes (after the sleep on the master, if that is enabled). As an additional feature, the case of duration < 0 will be reported as an execution error, so this opcode can be used as a failure generator. The case of duration == 0 will not be treated specially. """ OP_DSC_FIELD = "duration" OP_PARAMS = [ ("duration", ht.NoDefault, ht.TNumber, None), ("on_master", True, ht.TBool, None), ("on_nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None), ("repeat", 0, ht.TNonNegativeInt, None), ] def OP_DSC_FORMATTER(self, value): # pylint: disable=C0103,R0201 """Custom formatter for duration. """ try: v = float(value) except TypeError: v = value return str(v) class OpTestAllocator(OpCode): """Allocator framework testing. This opcode has two modes: - gather and return allocator input for a given mode (allocate new or replace secondary) and a given instance definition (direction 'in') - run a selected allocator for a given operation (as above) and return the allocator output (direction 'out') """ OP_DSC_FIELD = "iallocator" OP_PARAMS = [ ("direction", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS), None), ("mode", ht.NoDefault, ht.TElemOf(constants.VALID_IALLOCATOR_MODES), None), ("name", ht.NoDefault, ht.TNonEmptyString, None), ("nics", ht.NoDefault, ht.TMaybeListOf(ht.TDictOf(ht.TElemOf([constants.INIC_MAC, constants.INIC_IP, "bridge"]), ht.TMaybeString)), None), ("disks", ht.NoDefault, ht.TMaybe(ht.TList), None), ("hypervisor", None, ht.TMaybeString, None), _PIAllocFromDesc(None), ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), None), ("memory", None, ht.TMaybe(ht.TNonNegativeInt), None), ("vcpus", None, ht.TMaybe(ht.TNonNegativeInt), None), ("os", None, ht.TMaybeString, None), ("disk_template", None, ht.TMaybeString, None), ("instances", None, ht.TMaybeListOf(ht.TNonEmptyString), None), ("evac_mode", None, ht.TMaybe(ht.TElemOf(constants.IALLOCATOR_NEVAC_MODES)), None), ("target_groups", None, ht.TMaybeListOf(ht.TNonEmptyString), None), ("spindle_use", 1, ht.TNonNegativeInt, None), ("count", 1, ht.TNonNegativeInt, None), ] class OpTestJqueue(OpCode): """Utility opcode to test some aspects of the job queue. """ OP_PARAMS = [ ("notify_waitlock", False, ht.TBool, None), ("notify_exec", False, ht.TBool, None), ("log_messages", ht.EmptyList, ht.TListOf(ht.TString), None), ("fail", False, ht.TBool, None), ] class OpTestDummy(OpCode): """Utility opcode used by unittests. """ OP_PARAMS = [ ("result", ht.NoDefault, ht.NoType, None), ("messages", ht.NoDefault, ht.NoType, None), ("fail", ht.NoDefault, ht.NoType, None), ("submit_jobs", None, ht.NoType, None), ] WITH_LU = False # Network opcodes # Add a new network in the cluster class OpNetworkAdd(OpCode): """Add an IP network to the cluster.""" OP_DSC_FIELD = "network_name" OP_PARAMS = [ _PNetworkName, ("network", ht.NoDefault, _TIpNetwork4, "IPv4 subnet"), ("gateway", None, ht.TMaybe(_TIpAddress4), "IPv4 gateway"), ("network6", None, ht.TMaybe(_TIpNetwork6), "IPv6 subnet"), ("gateway6", None, ht.TMaybe(_TIpAddress6), "IPv6 gateway"), ("mac_prefix", None, ht.TMaybeString, "MAC address prefix that overrides cluster one"), ("add_reserved_ips", None, _TMaybeAddr4List, "Which IP addresses to reserve"), ("conflicts_check", True, ht.TBool, "Whether to check for conflicting IP addresses"), ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Network tags"), ] OP_RESULT = ht.TNone class OpNetworkRemove(OpCode): """Remove an existing network from the cluster. Must not be connected to any nodegroup. """ OP_DSC_FIELD = "network_name" OP_PARAMS = [ _PNetworkName, _PForce, ] OP_RESULT = ht.TNone class OpNetworkSetParams(OpCode): """Modify Network's parameters except for IPv4 subnet""" OP_DSC_FIELD = "network_name" OP_PARAMS = [ _PNetworkName, ("gateway", None, ht.TMaybeValueNone(_TIpAddress4), "IPv4 gateway"), ("network6", None, ht.TMaybeValueNone(_TIpNetwork6), "IPv6 subnet"), ("gateway6", None, ht.TMaybeValueNone(_TIpAddress6), "IPv6 gateway"), ("mac_prefix", None, ht.TMaybeValueNone(ht.TString), "MAC address prefix that overrides cluster one"), ("add_reserved_ips", None, _TMaybeAddr4List, "Which external IP addresses to reserve"), ("remove_reserved_ips", None, _TMaybeAddr4List, "Which external IP addresses to release"), ] OP_RESULT = ht.TNone class OpNetworkConnect(OpCode): """Connect a Network to a specific Nodegroup with the defined netparams (mode, link). Nics in this Network will inherit those params. Produce errors if a NIC (that its not already assigned to a network) has an IP that is contained in the Network this will produce error unless --no-conflicts-check is passed. """ OP_DSC_FIELD = "network_name" OP_PARAMS = [ _PGroupName, _PNetworkName, ("network_mode", ht.NoDefault, ht.TElemOf(constants.NIC_VALID_MODES), "Connectivity mode"), ("network_link", ht.NoDefault, ht.TString, "Connectivity link"), ("conflicts_check", True, ht.TBool, "Whether to check for conflicting IPs"), ] OP_RESULT = ht.TNone class OpNetworkDisconnect(OpCode): """Disconnect a Network from a Nodegroup. Produce errors if NICs are present in the Network unless --no-conficts-check option is passed. """ OP_DSC_FIELD = "network_name" OP_PARAMS = [ _PGroupName, _PNetworkName, ] OP_RESULT = ht.TNone class OpNetworkQuery(OpCode): """Compute the list of networks.""" OP_PARAMS = [ _POutputFields, _PUseLocking, ("names", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Empty list to query all groups, group names otherwise"), ] OP_RESULT = _TOldQueryResult def _GetOpList(): """Returns list of all defined opcodes. Does not eliminate duplicates by C{OP_ID}. """ return [v for v in globals().values() if (isinstance(v, type) and issubclass(v, OpCode) and hasattr(v, "OP_ID") and v is not OpCode)] OP_MAPPING = dict((v.OP_ID, v) for v in _GetOpList())
gpl-2.0
7,356,497,005,016,279,000
30.285189
80
0.657835
false
ronggong/jingjuSingingPhraseMatching
phoneticSimilarity/phonemeDurationStat.py
1
5978
''' * Copyright (C) 2017 Music Technology Group - Universitat Pompeu Fabra * * This file is part of jingjuSingingPhraseMatching * * pypYIN is free software: you can redistribute it and/or modify it under * the terms of the GNU Affero General Public License as published by the Free * Software Foundation (FSF), either version 3 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the Affero GNU General Public License * version 3 along with this program. If not, see http://www.gnu.org/licenses/ * * If you have any problem about this python version code, please contact: Rong Gong * [email protected] * * * If you want to refer this code, please use this article: * ''' from general.trainTestSeparation import getRecordingNamesSimi from general.textgridParser import syllableTextgridExtraction import matplotlib.pyplot as plt from scipy.misc import factorial from scipy.optimize import curve_fit from scipy.stats import gamma,expon from general.filePath import * from general.parameters import * from general.phonemeMap import dic_pho_map import json import numpy as np import os def phoDurCollection(recordings): ''' collect durations of pho into dictionary :param recordings: :return: ''' dict_duration_pho = {} for recording in recordings: nestedPhonemeLists, numSyllables, numPhonemes \ = syllableTextgridExtraction(textgrid_path,recording,syllableTierName,phonemeTierName) for pho in nestedPhonemeLists: for p in pho[1]: dur_pho = p[1] - p[0] sampa_pho = dic_pho_map[p[2]] if sampa_pho not in dict_duration_pho.keys(): dict_duration_pho[sampa_pho] = [dur_pho] else: dict_duration_pho[sampa_pho].append(dur_pho) return dict_duration_pho def poisson(k, lamb): return (lamb**k/factorial(k)) * np.exp(-lamb) def durPhoDistribution(array_durPho,sampa_pho,plot=False): ''' pho durations histogram :param array_durPho: :return: ''' # plt.figure(figsize=(10, 6)) # integer bin edges offset_bin = 0.005 bins = np.arange(0, max(array_durPho)+2, 2*offset_bin) - offset_bin # histogram entries, bin_edges, patches = plt.hist(array_durPho, bins=bins, normed=True, fc=(0, 0, 1, 0.7),label='pho: '+sampa_pho+' duration histogram') # centroid duration bin_centres = bin_edges-offset_bin bin_centres = bin_centres[:-1] centroid = np.sum(bin_centres*entries)/np.sum(entries) ##-- fit with poisson distribution # bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1]) # # parameters, cov_matrix = curve_fit(poisson, bin_middles, entries) # # x = np.linspace(0, max(array_durPho), 1000) # x = np.arange(0,max(array_durPho),hopsize_t) # # p = poisson(x, *parameters) ##-- fit with gamma distribution # discard some outlier durations by applying 2 standard deviations interval mean_array_durPho=np.mean(array_durPho) std_array_durPho=np.std(array_durPho) index_keep = np.where(array_durPho<mean_array_durPho+2*std_array_durPho) array_durPho_keep = array_durPho[index_keep] # discard some duration in histogram to make the fitting reasonable if class_name == 'laosheng': if sampa_pho == 'in': array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<2.5)] elif sampa_pho == '@n': array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<3)] elif sampa_pho == 'eI^': array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<1.5)] elif sampa_pho == 'EnEn': array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<2.0)] elif sampa_pho == 'UN': array_durPho_keep = array_durPho_keep[np.where(array_durPho_keep<2.5)] # step is the hopsize_t, corresponding to each frame # maximum length is the 4 times of the effective length x = np.arange(0, 8*max(array_durPho_keep),hopsize_t_phoneticSimilarity) param = gamma.fit(array_durPho_keep,floc = 0) y = gamma.pdf(x, *param) # y = expon.pdf(x) if plot: # possion fitting curve # plt.plot(x,p,'r',linewidth=2,label='Poisson distribution fitting curve') # gamma fitting curve # plt.plot(x, y, 'r-', lw=2, alpha=0.6, label='gamma pdf') plt.axvline(centroid, linewidth = 3, color = 'r', label = 'centroid frequency') plt.legend(fontsize=18) plt.xlabel('Pho duration distribution ',fontsize=18) plt.ylabel('Probability',fontsize=18) plt.axis('tight') plt.tight_layout() plt.show() y /= np.sum(y) return y.tolist(),centroid if __name__ == '__main__': rp = os.path.dirname(__file__) for cn in ['danAll', 'laosheng']: recordings_train = getRecordingNamesSimi('TRAIN',cn) dict_duration_pho = phoDurCollection(recordings_train) dict_centroid_dur = {} dict_dur_dist = {} for pho in dict_duration_pho: durDist,centroid_dur = durPhoDistribution(np.array(dict_duration_pho[pho]),pho,plot=False) dict_centroid_dur[pho] = centroid_dur dict_dur_dist[pho] = durDist # the first proba is always 0 # dump duration centroid with open(os.path.join(rp, 'lyricsRecognizer' ,'dict_centroid_dur'+cn+'.json'),'wb') as outfile: json.dump(dict_centroid_dur,outfile) # the gamma occupancy duration distribution is never used # with open('dict_dur_dist_'+class_name+'.json','wb') as outfile: # json.dump(dict_dur_dist,outfile)
agpl-3.0
-4,279,212,085,165,763,000
34.583333
145
0.652559
false
MatKallada/nbgrader
nbgrader/tests/apps/base.py
1
1462
import os import shutil import pytest import stat from IPython.nbformat import write as write_nb from IPython.nbformat.v4 import new_notebook @pytest.mark.usefixtures("temp_cwd") class BaseTestApp(object): def _empty_notebook(self, path): nb = new_notebook() full_dest = os.path.join(os.getcwd(), path) if not os.path.exists(os.path.dirname(full_dest)): os.makedirs(os.path.dirname(full_dest)) if os.path.exists(full_dest): os.remove(full_dest) with open(full_dest, 'w') as f: write_nb(nb, f, 4) def _copy_file(self, src, dest): full_src = os.path.join(os.path.dirname(__file__), src) full_dest = os.path.join(os.getcwd(), dest) if not os.path.exists(os.path.dirname(full_dest)): os.makedirs(os.path.dirname(full_dest)) shutil.copy(full_src, full_dest) def _make_file(self, path, contents=""): full_dest = os.path.join(os.getcwd(), path) if not os.path.exists(os.path.dirname(full_dest)): os.makedirs(os.path.dirname(full_dest)) if os.path.exists(full_dest): os.remove(full_dest) with open(full_dest, "w") as fh: fh.write(contents) def _get_permissions(self, filename): return oct(os.stat(filename).st_mode)[-3:] def _file_contents(self, path): with open(path, "r") as fh: contents = fh.read() return contents
bsd-3-clause
2,815,919,129,602,504,700
31.488889
63
0.604651
false
philgyford/django-ditto
ditto/lastfm/urls.py
1
2255
from django.conf.urls import url from . import views app_name = "lastfm" # The pattern for matching an Album/Artist/Track slug: slug_chars = "[\w.,:;=@&+%()$!°’~-]+" # noqa: W605 urlpatterns = [ url(regex=r"^$", view=views.HomeView.as_view(), name="home"), url( regex=r"^library/$", view=views.ScrobbleListView.as_view(), name="scrobble_list" ), url( regex=r"^library/albums/$", view=views.AlbumListView.as_view(), name="album_list", ), url( regex=r"^library/artists/$", view=views.ArtistListView.as_view(), name="artist_list", ), url( regex=r"^library/tracks/$", view=views.TrackListView.as_view(), name="track_list", ), url( regex=r"^music/(?P<artist_slug>%s)/$" % slug_chars, view=views.ArtistDetailView.as_view(), name="artist_detail", ), url( regex=r"^music/(?P<artist_slug>%s)/\+albums/$" % slug_chars, view=views.ArtistAlbumsView.as_view(), name="artist_albums", ), url( regex=r"^music/(?P<artist_slug>%s)/(?P<album_slug>%s)/$" % (slug_chars, slug_chars), view=views.AlbumDetailView.as_view(), name="album_detail", ), url( regex=r"^music/(?P<artist_slug>%s)/_/(?P<track_slug>%s)/$" % (slug_chars, slug_chars), view=views.TrackDetailView.as_view(), name="track_detail", ), # User pages. url( regex=r"^user/(?P<username>[a-z0-9]+)/$", view=views.UserDetailView.as_view(), name="user_detail", ), url( regex=r"^user/(?P<username>[a-z0-9]+)/library/$", view=views.UserScrobbleListView.as_view(), name="user_scrobble_list", ), url( regex=r"^user/(?P<username>[a-z0-9]+)/library/albums/$", view=views.UserAlbumListView.as_view(), name="user_album_list", ), url( regex=r"^user/(?P<username>[a-z0-9]+)/library/artists/$", view=views.UserArtistListView.as_view(), name="user_artist_list", ), url( regex=r"^user/(?P<username>[a-z0-9]+)/library/tracks/$", view=views.UserTrackListView.as_view(), name="user_track_list", ), ]
mit
1,310,448,974,129,350,100
26.802469
88
0.543961
false
OpenTechFund/WebApp
opentech/apply/review/migrations/0001_initial.py
1
1068
# Generated by Django 2.0.2 on 2018-03-13 17:23 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('funds', '0028_update_on_delete_django2'), ] operations = [ migrations.CreateModel( name='Review', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('review', models.TextField()), ('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='funds.ApplicationSubmission', related_name='reviews')), ], ), migrations.AlterUniqueTogether( name='review', unique_together={('author', 'submission')}, ), ]
gpl-2.0
-8,155,020,913,151,318,000
33.451613
153
0.618914
false
yinglanma/AI-project
examples/OpenAIGym/run-atari.py
1
3274
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: run-atari.py # Author: Yuxin Wu <[email protected]> import numpy as np import tensorflow as tf import os, sys, re, time import random import argparse import six from tensorpack import * from tensorpack.RL import * IMAGE_SIZE = (84, 84) FRAME_HISTORY = 4 CHANNEL = FRAME_HISTORY * 3 IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,) NUM_ACTIONS = None ENV_NAME = None from common import play_one_episode def get_player(dumpdir=None): pl = GymEnv(ENV_NAME, dumpdir=dumpdir, auto_restart=False) pl = MapPlayerState(pl, lambda img: cv2.resize(img, IMAGE_SIZE[::-1])) global NUM_ACTIONS NUM_ACTIONS = pl.get_action_space().num_actions() pl = HistoryFramePlayer(pl, FRAME_HISTORY) return pl class Model(ModelDesc): def _get_input_vars(self): assert NUM_ACTIONS is not None return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'state'), InputVar(tf.int32, (None,), 'action'), InputVar(tf.float32, (None,), 'futurereward') ] def _get_NN_prediction(self, image): image = image / 255.0 with argscope(Conv2D, nl=tf.nn.relu): l = Conv2D('conv0', image, out_channel=32, kernel_shape=5) l = MaxPooling('pool0', l, 2) l = Conv2D('conv1', l, out_channel=32, kernel_shape=5) l = MaxPooling('pool1', l, 2) l = Conv2D('conv2', l, out_channel=64, kernel_shape=4) l = MaxPooling('pool2', l, 2) l = Conv2D('conv3', l, out_channel=64, kernel_shape=3) l = FullyConnected('fc0', l, 512, nl=tf.identity) l = PReLU('prelu', l) policy = FullyConnected('fc-pi', l, out_dim=NUM_ACTIONS, nl=tf.identity) return policy def _build_graph(self, inputs): state, action, futurereward = inputs policy = self._get_NN_prediction(state) self.logits = tf.nn.softmax(policy, name='logits') def run_submission(cfg, output, nr): player = get_player(dumpdir=output) predfunc = get_predict_func(cfg) for k in range(nr): if k != 0: player.restart_episode() score = play_one_episode(player, predfunc) print("Total:", score) def do_submit(output): gym.upload(output, api_key='xxx') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode parser.add_argument('--load', help='load model', required=True) parser.add_argument('--env', help='environment name', required=True) parser.add_argument('--episode', help='number of episodes to run', type=int, default=100) parser.add_argument('--output', help='output directory', default='gym-submit') args = parser.parse_args() ENV_NAME = args.env assert ENV_NAME logger.info("Environment Name: {}".format(ENV_NAME)) p = get_player(); del p # set NUM_ACTIONS if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu cfg = PredictConfig( model=Model(), session_init=SaverRestore(args.load), input_var_names=['state'], output_var_names=['logits']) run_submission(cfg, args.output, args.episode)
apache-2.0
-3,475,211,309,922,932,000
32.070707
105
0.618815
false
pydcs/dcs
dcs/coalition.py
1
13638
import sys from typing import Dict, Union, List, TYPE_CHECKING import dcs.countries as countries import dcs.unitgroup as unitgroup import dcs.planes as planes import dcs.helicopters as helicopters import dcs.ships as ships from dcs.unit import Vehicle, Static, Ship, FARP, SingleHeliPad from dcs.flyingunit import Plane, Helicopter from dcs.point import MovingPoint, StaticPoint from dcs.country import Country from dcs.status_message import StatusMessage, MessageType, MessageSeverity if TYPE_CHECKING: from . import Mission class Coalition: def __init__(self, name, bullseye=None): self.name = name self.countries = {} # type: Dict[str, Country] self.bullseye = bullseye self.nav_points = [] # TODO @staticmethod def _sort_keys(points): keys = [] for imp_point_idx in points: keys.append(int(imp_point_idx)) keys.sort() return keys @staticmethod def _import_moving_point(mission, group: unitgroup.Group, imp_group) -> unitgroup.Group: keys = Coalition._sort_keys(imp_group["route"]["points"]) for imp_point_idx in keys: imp_point = imp_group["route"]["points"][imp_point_idx] point = MovingPoint() point.load_from_dict(imp_point, mission.translation) group.add_point(point) return group @staticmethod def _import_static_point(mission, group: unitgroup.Group, imp_group) -> unitgroup.Group: keys = Coalition._sort_keys(imp_group["route"]["points"]) for imp_point_idx in keys: imp_point = imp_group["route"]["points"][imp_point_idx] point = StaticPoint() point.load_from_dict(imp_point, mission.translation) group.add_point(point) return group @staticmethod def _park_unit_on_airport( mission: 'Mission', group: unitgroup.Group, unit: Union[Plane, Helicopter]) -> List[StatusMessage]: ret = [] if group.points[0].airdrome_id is not None and unit.parking is not None: airport = mission.terrain.airport_by_id(group.points[0].airdrome_id) slot = airport.parking_slot(unit.parking) if slot is not None: unit.set_parking(slot) else: msg = "Parking slot id '{i}' for unit '{u}' in group '{p}' on airport '{a}' " \ "not valid, placing on next free".format(i=unit.parking, u=unit.name, a=airport.name, p=group.name) print("WARN", msg, file=sys.stderr) ret.append(StatusMessage(msg, MessageType.PARKING_SLOT_NOT_VALID, MessageSeverity.WARN)) slot = airport.free_parking_slot(unit.unit_type) if slot is not None: unit.set_parking(slot) else: msg = "No free parking slots for unit '{u}' in unit group '{p}' on airport '{a}', ignoring"\ .format(u=unit.name, a=airport.name, p=group.name) print("ERRO", msg, file=sys.stderr) ret.append(StatusMessage(msg, MessageType.PARKING_SLOTS_FULL, MessageSeverity.ERROR)) return ret @staticmethod def get_name(mission: "Mission", name: str) -> str: # Group, unit names are not localized for missions are created in 2.7. if mission.version < 19: return str(mission.translation.get_string(name)) else: return name def load_from_dict(self, mission, d) -> List[StatusMessage]: status: List[StatusMessage] = [] for country_idx in d["country"]: imp_country = d["country"][country_idx] _country = countries.get_by_id(imp_country["id"]) if "vehicle" in imp_country: for vgroup_idx in imp_country["vehicle"]["group"]: vgroup = imp_country["vehicle"]["group"][vgroup_idx] vg = unitgroup.VehicleGroup(vgroup["groupId"], self.get_name(mission, vgroup["name"]), vgroup["start_time"]) vg.load_from_dict(vgroup) mission.current_group_id = max(mission.current_group_id, vg.id) Coalition._import_moving_point(mission, vg, vgroup) # units for imp_unit_idx in vgroup["units"]: imp_unit = vgroup["units"][imp_unit_idx] unit = Vehicle( id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"]), _type=imp_unit["type"]) unit.load_from_dict(imp_unit) mission.current_unit_id = max(mission.current_unit_id, unit.id) vg.add_unit(unit) _country.add_vehicle_group(vg) if "ship" in imp_country: for group_idx in imp_country["ship"]["group"]: imp_group = imp_country["ship"]["group"][group_idx] vg = unitgroup.ShipGroup(imp_group["groupId"], self.get_name(mission, imp_group["name"]), imp_group["start_time"]) vg.load_from_dict(imp_group) mission.current_group_id = max(mission.current_group_id, vg.id) Coalition._import_moving_point(mission, vg, imp_group) # units for imp_unit_idx in imp_group["units"]: imp_unit = imp_group["units"][imp_unit_idx] unit = Ship( id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"]), _type=ships.ship_map[imp_unit["type"]]) unit.load_from_dict(imp_unit) mission.current_unit_id = max(mission.current_unit_id, unit.id) vg.add_unit(unit) _country.add_ship_group(vg) if "plane" in imp_country: for pgroup_idx in imp_country["plane"]["group"]: pgroup = imp_country["plane"]["group"][pgroup_idx] plane_group = unitgroup.PlaneGroup(pgroup["groupId"], self.get_name(mission, pgroup["name"]), pgroup["start_time"]) plane_group.load_from_dict(pgroup) mission.current_group_id = max(mission.current_group_id, plane_group.id) Coalition._import_moving_point(mission, plane_group, pgroup) # units for imp_unit_idx in pgroup["units"]: imp_unit = pgroup["units"][imp_unit_idx] plane = Plane( _id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"]), _type=planes.plane_map[imp_unit["type"]], _country=_country) plane.load_from_dict(imp_unit) if _country.reserve_onboard_num(plane.onboard_num): msg = "{c} Plane '{p}' already using tail number: {t}".format( c=self.name.upper(), p=plane.name, t=plane.onboard_num) status.append(StatusMessage(msg, MessageType.ONBOARD_NUM_DUPLICATE, MessageSeverity.WARN)) print("WARN:", msg, file=sys.stderr) status += self._park_unit_on_airport(mission, plane_group, plane) mission.current_unit_id = max(mission.current_unit_id, plane.id) plane_group.add_unit(plane) # check runway start # if plane_group.points[0].airdrome_id is not None and plane_group.units[0].parking is None: # airport = mission.terrain.airport_by_id(plane_group.points[0].airdrome_id) # airport.occupy_runway(plane_group) _country.add_plane_group(plane_group) if "helicopter" in imp_country: for pgroup_idx in imp_country["helicopter"]["group"]: pgroup = imp_country["helicopter"]["group"][pgroup_idx] helicopter_group = unitgroup.HelicopterGroup( pgroup["groupId"], self.get_name(mission, pgroup["name"]), pgroup["start_time"]) helicopter_group.load_from_dict(pgroup) mission.current_group_id = max(mission.current_group_id, helicopter_group.id) Coalition._import_moving_point(mission, helicopter_group, pgroup) # units for imp_unit_idx in pgroup["units"]: imp_unit = pgroup["units"][imp_unit_idx] heli = Helicopter( _id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"]), _type=helicopters.helicopter_map[imp_unit["type"]], _country=_country) heli.load_from_dict(imp_unit) if _country.reserve_onboard_num(heli.onboard_num): msg = "{c} Helicopter '{h}' already using tail number: {t}".format( c=self.name.upper(), h=heli.name, t=heli.onboard_num) status.append(StatusMessage(msg, MessageType.ONBOARD_NUM_DUPLICATE, MessageSeverity.WARN)) print("WARN:", msg, file=sys.stderr) status += self._park_unit_on_airport(mission, helicopter_group, heli) mission.current_unit_id = max(mission.current_unit_id, heli.id) helicopter_group.add_unit(heli) # check runway start # if helicopter_group.points[0].airdrome_id is not None and helicopter_group.units[0].parking is None: # airport = mission.terrain.airport_by_id(helicopter_group.points[0].airdrome_id) # airport.occupy_runway(helicopter_group) _country.add_helicopter_group(helicopter_group) if "static" in imp_country: for sgroup_idx in imp_country["static"]["group"]: sgroup = imp_country["static"]["group"][sgroup_idx] static_group = unitgroup.StaticGroup(sgroup["groupId"], self.get_name(mission, sgroup["name"])) static_group.load_from_dict(sgroup) mission.current_group_id = max(mission.current_group_id, static_group.id) Coalition._import_static_point(mission, static_group, sgroup) # units for imp_unit_idx in sgroup["units"]: imp_unit = sgroup["units"][imp_unit_idx] if imp_unit["type"] == "FARP": static = FARP( unit_id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"])) elif imp_unit["type"] == "SINGLE_HELIPAD": static = SingleHeliPad( unit_id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"])) else: static = Static( unit_id=imp_unit["unitId"], name=self.get_name(mission, imp_unit["name"]), _type=imp_unit["type"]) static.load_from_dict(imp_unit) mission.current_unit_id = max(mission.current_unit_id, static.id) static_group.add_unit(static) _country.add_static_group(static_group) self.add_country(_country) return status def set_bullseye(self, bulls): self.bullseye = bulls def add_country(self, country): self.countries[country.name] = country return country def remove_country(self, name): return self.countries.pop(name) def swap_country(self, coalition, name): return coalition.add_country(self.remove_country(name)) def country(self, country_name: str): return self.countries.get(country_name, None) def country_by_id(self, _id: int): for cn in self.countries: c = self.countries[cn] if c.id == _id: return c return None def find_group(self, group_name, search="exact"): for c in self.countries: g = self.countries[c].find_group(group_name, search) if g: return g return None def dict(self): d = {"name": self.name} if self.bullseye: d["bullseye"] = self.bullseye d["country"] = {} i = 1 for country in sorted(self.countries.keys()): d["country"][i] = self.country(country).dict() i += 1 d["nav_points"] = {} return d
lgpl-3.0
-5,853,055,812,100,301,000
45.546075
122
0.510045
false
compsci-hfh/app
project/project/defaults.py
1
8179
import os from django.contrib import messages SETTINGS_DIR = os.path.dirname(os.path.abspath(__file__)) PROJECT_DIR = os.path.dirname(SETTINGS_DIR) BUILDOUT_DIR = os.path.dirname(PROJECT_DIR) VAR_DIR = os.path.join(BUILDOUT_DIR, "var") ########################################################################## # # Secret settings # ########################################################################## # If a secret_settings file isn't defined, open a new one and save a # SECRET_KEY in it. Then import it. All passwords and other secret # settings should be stored in secret_settings.py. NOT in settings.py try: from secret_settings import * except ImportError: print "Couldn't find secret_settings.py file. Creating a new one." secret_path = os.path.join(SETTINGS_DIR, "secret_settings.py") with open(secret_path, 'w') as secret_settings: secret_key = ''.join([chr(ord(x) % 90 + 33) for x in os.urandom(40)]) secret_settings.write("SECRET_KEY = '''%s'''\n" % secret_key) from secret_settings import * ########################################################################## # # Authentication settings # ########################################################################## # When a user successfully logs in, redirect here by default LOGIN_REDIRECT_URL = '/' # The address to redirect to when a user must authenticate LOGIN_URL = '/accounts/google/login/?process=login' ACCOUNT_SIGNUP_FORM_CLASS = 'project.profiles.forms.SignupForm' # Require that users who are signing up provide an email address ACCOUNT_EMAIL_REQUIRED = True # Don't store login tokens. We don't need them. SOCIALACCOUNT_STORE_TOKENS = False # Try to pull username/email from provider. SOCIALACCOUNT_AUTO_SIGNUP = False SOCIALACCOUNT_PROVIDERS = { 'google': { 'SCOPE': ['profile', 'email'], 'AUTH_PARAMS': { 'access_type': 'online' } }, } AUTHENTICATION_BACKENDS = ( 'allauth.account.auth_backends.AuthenticationBackend', ) ABSOLUTE_URL_OVERRIDES = { 'auth.user': lambda u: "/profile/%s/" % u.username, } ########################################################################## # # Email Settings # ########################################################################## # These should be added to secret_settings.py # EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # EMAIL_HOST = '' # EMAIL_PORT = 587 # EMAIL_HOST_USER = '' # EMAIL_HOST_PASSWORD = '' # EMAIL_USE_TLS = True # DEFAULT_FROM_EMAIL = '' ########################################################################## # # API settings # ########################################################################## REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ) } ########################################################################## # # Bleach settings # ########################################################################## import bleach ALLOWED_HTML_TAGS = bleach.ALLOWED_TAGS + ['h1', 'h2', 'h3', 'p', 'img'] ALLOWED_HTML_ATTRS = bleach.ALLOWED_ATTRIBUTES ALLOWED_HTML_ATTRS.update({ 'img': ['src', 'alt'], }) ########################################################################## # # Crispy settings # ########################################################################## CRISPY_TEMPLATE_PACK = "bootstrap3" ########################################################################## # # Messages settings # ########################################################################## # Change the default messgae tags to play nice with Bootstrap MESSAGE_TAGS = { messages.DEBUG: 'alert-info', messages.INFO: 'alert-info', messages.SUCCESS: 'alert-success', messages.WARNING: 'alert-warning', messages.ERROR: 'alert-danger', } ########################################################################## # # Database settings # ########################################################################## # Should be overridden by development.py or production.py DATABASES = None ########################################################################## # # Location settings # ########################################################################## TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' USE_I18N = True USE_L10N = True USE_TZ = True ########################################################################## # # Static files settings # ########################################################################## MEDIA_ROOT = os.path.join(VAR_DIR, "uploads") MEDIA_URL = '/uploads/' STATIC_ROOT = os.path.join(VAR_DIR, "static") STATIC_URL = '/static/' ADMIN_MEDIA_PREFIX = '/static/admin/' STATICFILES_DIRS = ( os.path.join(PROJECT_DIR, "static"), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) COMPRESS_PRECOMPILERS = ( ('text/coffeescript', 'coffee --compile --stdio'), ('text/x-sass', 'sass {infile} {outfile}'), ('text/x-scss', 'sass --scss {infile} {outfile}'), ) COMPRESS_ENABLED = True ########################################################################## # # Template settings # ########################################################################## TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(PROJECT_DIR, "templates")], 'OPTIONS': { 'context_processors': [ # Django 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.csrf', 'django.template.context_processors.debug', 'django.template.context_processors.media', 'django.template.context_processors.request', 'django.template.context_processors.static', ], 'loaders': [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader', ] }, }, ] ########################################################################## # # Middleware settings # ########################################################################## MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ########################################################################## # # URL settings # ########################################################################## ROOT_URLCONF = 'project.project.urls' ########################################################################## # # Installed apps settings # ########################################################################## INSTALLED_APPS = ( # Django Content types *must* be first. 'django.contrib.contenttypes', # AllAuth 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.google', # Admin Tools 'admin_tools', 'admin_tools.theming', 'admin_tools.menu', 'admin_tools.dashboard', # Django 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.staticfiles', # Crispy Forms 'crispy_forms', # Rest Framework 'rest_framework', # Django Extensions 'django_extensions', # Compressor 'compressor', # H4H apps 'project.teams', 'project.profiles', 'project.submission', # Sentry client 'raven.contrib.django.raven_compat', )
mit
8,719,486,345,774,488,000
26.354515
77
0.493948
false
nikitanovosibirsk/jj
tests/matchers/test_resolvable_matcher.py
1
1810
import sys if sys.version_info >= (3, 8): from unittest.mock import AsyncMock else: from asynctest.mock import CoroutineMock as AsyncMock from unittest.mock import Mock, call import pytest from pytest import raises from jj.matchers import ResolvableMatcher from .._test_utils.fixtures import handler_, request_, resolver_ from .._test_utils.steps import given, then, when __all__ = ("request_", "resolver_", "handler_",) @pytest.mark.asyncio async def test_abstract_match_method_raises_error(*, resolver_, request_): with given: matcher = ResolvableMatcher(resolver=resolver_) with when, raises(Exception) as exception: await matcher.match(request_) with then: assert exception.type is NotImplementedError @pytest.mark.asyncio async def test_concrete_match_method_not_raises_error(*, resolver_, request_): with given: matcher_ = Mock(ResolvableMatcher, match=AsyncMock(return_value=True)) with when: actual = await matcher_.match(request_) with then: assert actual is True assert matcher_.mock_calls == [call.match(request_)] @pytest.mark.asyncio async def test_decorator_registers_matcher(*, resolver_, handler_): with given: matcher = ResolvableMatcher(resolver=resolver_) with when: actual = matcher(handler_) with then: assert actual == handler_ assert resolver_.mock_calls == [call.register_matcher(matcher.match, handler_)] handler_.assert_not_called() def test_repr(*, resolver_): with given: resolver_.__repr__ = Mock(return_value="<Resolver>") matcher = ResolvableMatcher(resolver=resolver_) with when: actual = repr(matcher) with then: assert actual == "ResolvableMatcher(resolver=<Resolver>)"
apache-2.0
507,802,635,533,721,900
25.231884
87
0.680663
false
pemami4911/Sub8
drivers/sub8_videoray_m5_thruster/nodes/thruster_driver.py
1
6864
#!/usr/bin/env python import numpy as np import json import rospy import scipy.interpolate import threading import argparse from std_msgs.msg import Header from sub8_msgs.msg import Thrust, ThrusterCmd, ThrusterStatus from sub8_ros_tools import wait_for_param, thread_lock from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, FailThrusterResponse from sub8_thruster_comm import thruster_comm_factory from sub8_alarm import AlarmBroadcaster lock = threading.Lock() class ThrusterDriver(object): def __init__(self, config_path, bus_layout): '''Thruster driver, an object for commanding all of the sub's thrusters - Gather configuration data and make it available to other nodes - Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters - Track a thrust_dict, which maps thruster names to the appropriate port - Given a command message, route that command to the appropriate port/thruster - Send a thruster status message describing the status of the particular thruster ''' self.alarm_broadcaster = AlarmBroadcaster() self.thruster_out_alarm = self.alarm_broadcaster.add_alarm( name='thruster_out', action_required=True, severity=2 ) self.failed_thrusters = [] self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn("Running fake thrusters for simulation, based on parameter '/simulate'") # Individual thruster configuration data newtons, thruster_input = self.load_config(config_path) self.interpolate = scipy.interpolate.interp1d(newtons, thruster_input) # Bus configuration self.port_dict = self.load_bus_layout(bus_layout) thrust_service = rospy.Service('thrusters/thruster_range', ThrusterInfo, self.get_thruster_info) self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) self.status_pub = rospy.Publisher('thrusters/thruster_status', ThrusterStatus, queue_size=8) # This is essentially only for testing self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) def load_config(self, path): '''Load the configuration data: - Map force inputs from Newtons to [-1, 1] required by the thruster ''' try: _file = file(path) except IOError, e: rospy.logerr("Could not find thruster configuration file at {}".format(path)) raise(e) json_data = json.load(_file) newtons = json_data['calibration_data']['newtons'] thruster_input = json_data['calibration_data']['thruster_input'] return newtons, thruster_input def get_thruster_info(self, srv): '''Get the thruster info for a particular thruster ID Right now, this is only the min and max thrust data ''' # Unused right now query_id = srv.thruster_id min_thrust = min(self.interpolate.x) max_thrust = max(self.interpolate.x) thruster_info = ThrusterInfoResponse( min_force=min_thrust, max_force=max_thrust ) return thruster_info @thread_lock(lock) def load_bus_layout(self, layout): '''Load and handle the thruster bus layout''' port_dict = {} for port in layout: thruster_port = thruster_comm_factory(port, fake=self.make_fake) # Add the thrusters to the thruster dict for thruster_name, thruster_info in port['thrusters'].items(): port_dict[thruster_name] = thruster_port return port_dict @thread_lock(lock) def command_thruster(self, name, force): '''Issue a a force command (in Newtons) to a named thruster Example names are BLR, FLL, etc TODO: Make this still get a thruster status when the thruster is failed (We could figure out if it has stopped being failed!) ''' if name in self.failed_thrusters: return target_port = self.port_dict[name] clipped_force = np.clip(force, min(self.interpolate.x), max(self.interpolate.x)) normalized_force = self.interpolate(clipped_force) # We immediately get thruster_status back thruster_status = target_port.command_thruster(name, force) message_contents = [ 'rpm', 'bus_voltage', 'bus_current', 'temperature', 'fault', 'response_node_id', ] message_keyword_args = {key: thruster_status[key] for key in message_contents} if message_keyword_args['fault'] != 0: self.alert_thruster_loss(thruster_name, message_keyword_args) self.status_pub.publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, **message_keyword_args ) ) def thrust_cb(self, msg): '''Callback for recieving thrust commands These messages contain a list of instructions, one for each thruster ''' for thrust_cmd in msg.thruster_commands: self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) def alert_thruster_loss(self, thruster_name, fault_info): self.thruster_out_alarm.raise_alarm( problem_description='Thruster {} has failed'.format(thruster_name), parameters={ 'thruster_name': thruster_name, 'fault_info': fault_info } ) self.failed_thrusters.append(thruster_name) def fail_thruster(self, srv): self.alert_thruster_loss(srv.thruster_name, None) return FailThrusterResponse() if __name__ == '__main__': usage_msg = "Interface to Sub8's VideoRay M5 thrusters" desc_msg = "Specify a path to the configuration.json file containing the thrust calibration data" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) parser.add_argument('--configuration_path', dest='config_path', help='Designate the absolute path of the calibration/configuration json file') args = parser.parse_args(rospy.myargv()[1:]) config_path = args.config_path rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/busses' rospy.loginfo("Thruster Driver waiting for parameter, {}".format(layout_parameter)) busses = wait_for_param(layout_parameter) if busses is None: raise(rospy.exceptions.ROSException("Failed to find parameter '{}'".format(layout_parameter))) thruster_driver = ThrusterDriver(config_path, busses) rospy.spin()
mit
2,318,974,325,090,740,000
38.228571
104
0.640589
false
vincent99/cattle
tests/integration-v1/cattletest/core/test_external_events.py
1
9631
from common import * # NOQA from cattle import ApiError SERVICE_KIND = 'kubernetesService' def from_context(context): return context.client, context.agent_client, context.host def test_bad_agent(super_client, new_context): _, account, agent_client = register_simulated_host(new_context, return_agent=True) def post(): external_id = random_str() agent_client.create_external_storage_pool_event( externalId=external_id, eventType="storagepool.create", hostUuids=[], storagePool={ 'name': 'name-%s' % external_id, 'externalId': external_id, }) # Test it works post() # Test it fails with two agents super_client.wait_success(super_client.create_agent( uri='test://' + random_str(), accountId=account.id)) with pytest.raises(ApiError) as e: post() assert e.value.error.code == 'MissingRequired' # Test it fails with no agents for agent in super_client.list_agent(accountId=account.id): super_client.wait_success(agent.deactivate()) with pytest.raises(ApiError) as e: post() assert e.value.error.code == 'CantVerifyAgent' def test_external_host_event_miss(new_context): new_context.create_container() client = new_context.client host = new_context.host event = client.create_external_host_event(hostLabel='foo=bar', eventType='host.evacuate', deleteHost=True) event = client.wait_success(event) host = client.reload(host) assert event.state == 'created' assert host.state == 'active' def test_external_host_event_wrong_event(new_context): c = new_context.create_container() client = new_context.client host = client.update(new_context.host, labels={ 'foo': 'bar' }) host = client.wait_success(host) assert host.labels == {'foo': 'bar'} event = client.create_external_host_event(hostLabel='foo=bar', eventType='host.notevacuate', deleteHost=True) assert event.state == 'creating' event = client.wait_success(event) host = client.reload(host) c = client.wait_success(c) assert event.state == 'created' assert host.state == 'active' assert c.state == 'running' def test_external_host_event_hit(new_context): c = new_context.create_container() client = new_context.client host = client.wait_success(new_context.host) host = client.update(host, labels={ 'foo': 'bar' }) host = client.wait_success(host) assert host.labels == {'foo': 'bar'} event = client.create_external_host_event(hostLabel='foo=bar', eventType='host.evacuate', deleteHost=True) assert event.state == 'creating' event = client.wait_success(event) host = client.reload(host) c = client.wait_success(c) assert event.state == 'created' assert host.removed is not None assert c.removed is not None def test_external_host_event_no_delete(new_context): c = new_context.create_container() client = new_context.client host = client.update(new_context.host, labels={ 'foo': 'bar' }) host = client.wait_success(host) assert host.labels == {'foo': 'bar'} event = client.create_external_host_event(hostLabel='foo=bar', eventType='host.evacuate') assert event.state == 'creating' event = client.wait_success(event) host = client.reload(host) c = client.wait_success(c) assert event.state == 'created' assert host.state == 'inactive' def test_external_host_event_by_id(new_context): c = new_context.create_container() new_host = register_simulated_host(new_context) client = new_context.client host = client.update(new_context.host, labels={ 'foo': 'bar' }) host = client.wait_success(host) assert host.labels == {'foo': 'bar'} event = client.create_external_host_event(hostId=host.id, eventType='host.evacuate') assert event.state == 'creating' event = client.wait_success(event) new_host = client.reload(new_host) c = client.wait_success(c) host = client.reload(host) assert event.state == 'created' assert host.state == 'inactive' assert new_host.state == 'active' def test_external_dns_event(super_client, new_context): client, agent_client, host = from_context(new_context) stack = client.create_environment(name=random_str()) stack = client.wait_success(stack) image_uuid = new_context.image_uuid launch_config = {"imageUuid": image_uuid} svc1 = client.create_service(name=random_str(), environmentId=stack.id, launchConfig=launch_config) svc1 = client.wait_success(svc1) domain_name1 = "foo.com" create_dns_event(client, agent_client, super_client, new_context, svc1.name, stack.name, domain_name1) # wait for dns name to be updated svc1 = client.reload(svc1) assert svc1.fqdn == domain_name1 def create_dns_event(client, agent_client, super_client, context, svc_name1, stack_name, domain_name): external_id = random_str() event_type = "externalDnsEvent" dns_event = { 'externalId': external_id, 'eventType': event_type, "stackName": stack_name, "serviceName": svc_name1, "fqdn": domain_name } event = agent_client.create_external_dns_event(dns_event) assert event.externalId == external_id assert event.eventType == event_type event = wait_for(lambda: event_wait(client, event)) assert event.accountId == context.project.id assert event.reportedAccountId == context.agent.id return event def test_external_service_event_create(client, context, super_client): agent_client = context.agent_client env_external_id = random_str() environment = {"name": "foo", "externalId": env_external_id} svc_external_id = random_str() svc_name = 'svc-name-%s' % svc_external_id selector = 'foo=bar1' template = {'foo': 'bar'} svc_data = { 'selectorContainer': selector, 'kind': SERVICE_KIND, 'name': svc_name, 'externalId': svc_external_id, 'template': template, } event = agent_client.create_external_service_event( eventType='service.create', environment=environment, externalId=svc_external_id, service=svc_data, ) event = wait_for(lambda: event_wait(client, event)) assert event is not None svc = wait_for(lambda: service_wait(client, svc_external_id)) assert svc.externalId == svc_external_id assert svc.name == svc_name assert svc.kind == SERVICE_KIND assert svc.selectorContainer == selector assert svc.environmentId is not None assert svc.template == template envs = client.list_environment(externalId=env_external_id) assert len(envs) == 1 assert envs[0].id == svc.environmentId wait_for_condition(client, svc, lambda x: x.state == 'active', lambda x: 'State is: ' + x.state) # Update new_selector = 'newselector=foo' svc_data = { 'selectorContainer': new_selector, 'kind': SERVICE_KIND, 'template': {'foo': 'bar'}, } agent_client.create_external_service_event( eventType='service.update', environment=environment, externalId=svc_external_id, service=svc_data, ) wait_for_condition(client, svc, lambda x: x.selectorContainer == new_selector, lambda x: 'Selector is: ' + x.selectorContainer) # Delete agent_client.create_external_service_event( name=svc_name, eventType='service.remove', externalId=svc_external_id, service={'kind': SERVICE_KIND}, ) wait_for_condition(client, svc, lambda x: x.state == 'removed', lambda x: 'State is: ' + x.state) def test_external_stack_event_create(client, context, super_client): agent_client = context.agent_client env_external_id = random_str() environment = {"name": env_external_id, "externalId": env_external_id, "kind": "environment"} env = client.create_environment(environment) env = client.wait_success(env) service = { 'kind': SERVICE_KIND, } event = agent_client.create_external_service_event( eventType='stack.remove', environment=environment, externalId=env_external_id, service=service, ) event = wait_for(lambda: event_wait(client, event)) assert event is not None wait_for(lambda: len(client.list_environment(externalId=env_external_id)) == 0) def service_wait(client, external_id): services = client.list_kubernetes_service(externalId=external_id) if len(services) and services[0].state == 'active': return services[0] def event_wait(client, event): created = client.by_id('externalEvent', event.id) if created is not None and created.state == 'created': return created
apache-2.0
-3,864,805,552,549,432,300
29.671975
75
0.603779
false
rudhir-upretee/Sumo17_With_Netsim
tools/build/checkSvnProps.py
1
6351
#!/usr/bin/env python """ @file checkSvnProps.py @author Michael Behrisch @date 2010 @version $Id: checkSvnProps.py 13811 2013-05-01 20:31:43Z behrisch $ Checks svn property settings for all files. SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/ Copyright (C) 2010-2013 DLR (http://www.dlr.de/) and contributors All rights reserved """ import os, subprocess, sys, xml.sax from optparse import OptionParser _SOURCE_EXT = [".h", ".cpp", ".py", ".pl", ".java", ".am"] _TESTDATA_EXT = [".xml", ".prog", ".csv", ".complex", ".dfrouter", ".duarouter", ".jtrrouter", ".astar", ".chrouter", ".tcl", ".txt", ".netconvert", ".netgen", ".od2trips", ".polyconvert", ".sumo", ".meso", ".tools", ".traci", ".activitygen", ".scenario", ".sumocfg", ".netccfg", ".netgcfg"] _VS_EXT = [".vsprops", ".sln", ".vcproj", ".bat", ".props", ".vcxproj", ".filters"] _KEYWORDS = "HeadURL Id LastChangedBy LastChangedDate LastChangedRevision" class PropertyReader(xml.sax.handler.ContentHandler): """Reads the svn properties of files as written by svn pl -v --xml""" def __init__(self, doFix): self._fix = doFix self._file = "" self._property = None self._value = "" self._hadEOL = False self._hadKeywords = False def startElement(self, name, attrs): if name == 'target': self._file = attrs['path'] seen.add(os.path.join(svnRoot, self._file)) if name == 'property': self._property = attrs['name'] def characters(self, content): if self._property: self._value += content def endElement(self, name): ext = os.path.splitext(self._file)[1] if name == 'property' and self._property == "svn:eol-style": self._hadEOL = True if name == 'property' and self._property == "svn:keywords": self._hadKeywords = True if ext in _SOURCE_EXT or ext in _TESTDATA_EXT or ext in _VS_EXT: if name == 'property' and self._property == "svn:executable" and ext not in [".py", ".pl", ".bat"]: print self._file, self._property, self._value if self._fix: subprocess.call(["svn", "pd", "svn:executable", self._file]) if name == 'property' and self._property == "svn:mime-type": print self._file, self._property, self._value if self._fix: subprocess.call(["svn", "pd", "svn:mime-type", self._file]) if ext in _SOURCE_EXT or ext in _TESTDATA_EXT: if name == 'property' and self._property == "svn:eol-style" and self._value != "LF"\ or name == "target" and not self._hadEOL: print self._file, "svn:eol-style", self._value if self._fix: if os.name == "posix": subprocess.call(["sed", "-i", r's/\r$//', self._file]) subprocess.call(["sed", "-i", r's/\r/\n/g', self._file]) subprocess.call(["svn", "ps", "svn:eol-style", "LF", self._file]) if ext in _SOURCE_EXT: if name == 'property' and self._property == "svn:keywords" and self._value != _KEYWORDS\ or name == "target" and not self._hadKeywords: print self._file, "svn:keywords", self._value if self._fix: subprocess.call(["svn", "ps", "svn:keywords", _KEYWORDS, self._file]) if ext in _VS_EXT: if name == 'property' and self._property == "svn:eol-style" and self._value != "CRLF"\ or name == "target" and not self._hadEOL: print self._file, "svn:eol-style", self._value if self._fix: subprocess.call(["svn", "ps", "svn:eol-style", "CRLF", self._file]) if name == 'property': self._value = "" self._property = None if name == 'target': self._hadEOL = False self._hadKeywords = False optParser = OptionParser() optParser.add_option("-v", "--verbose", action="store_true", default=False, help="tell me what you are doing") optParser.add_option("-f", "--fix", action="store_true", default=False, help="fix invalid svn properties") (options, args) = optParser.parse_args() seen = set() sumoRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) svnRoots = [sumoRoot] if len(args) > 0: svnRoots = [os.path.abspath(a) for a in args] else: upDir = os.path.dirname(sumoRoot) for l in subprocess.Popen(["svn", "pg", "svn:externals", upDir], stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')).communicate()[0].splitlines(): if l[:5] == "sumo/": svnRoots.append(os.path.join(upDir, l.split()[0])) for svnRoot in svnRoots: if options.verbose: print "checking", svnRoot output = subprocess.Popen(["svn", "pl", "-v", "-R", "--xml", svnRoot], stdout=subprocess.PIPE).communicate()[0] xml.sax.parseString(output, PropertyReader(options.fix)) if options.verbose: print "re-checking tree at", sumoRoot for root, dirs, files in os.walk(sumoRoot): for name in files: fullName = os.path.join(root, name) if fullName in seen or subprocess.call(["svn", "ls", fullName], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT): continue ext = os.path.splitext(name)[1] if ext in _SOURCE_EXT or ext in _TESTDATA_EXT or ext in _VS_EXT: print fullName, "svn:eol-style" if options.fix: if ext in _VS_EXT: subprocess.call(["svn", "ps", "svn:eol-style", "CRLF", fullName]) else: if os.name == "posix": subprocess.call(["sed", "-i", 's/\r$//', fullName]) subprocess.call(["svn", "ps", "svn:eol-style", "LF", fullName]) if ext in _SOURCE_EXT: print fullName, "svn:keywords" if options.fix: subprocess.call(["svn", "ps", "svn:keywords", _KEYWORDS, fullName]) for ignoreDir in ['.svn', 'foreign', 'contributed']: if ignoreDir in dirs: dirs.remove(ignoreDir)
gpl-3.0
-8,763,534,845,186,761,000
44.364286
153
0.551724
false
frew/simpleproto
scons-local-1.1.0/SCons/Tool/packaging/src_targz.py
1
1623
"""SCons.Tool.Packaging.targz The targz SRC packager. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/packaging/src_targz.py 3603 2008/10/10 05:46:45 scons" from SCons.Tool.packaging import putintopackageroot def package(env, target, source, PACKAGEROOT, **kw): bld = env['BUILDERS']['Tar'] bld.set_suffix('.tar.gz') target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0) return bld(env, target, source, TARFLAGS='-zc')
bsd-2-clause
757,621,664,930,149,500
42.864865
99
0.754775
false
splotz90/urh
src/urh/ui/ui_signal_frame.py
1
41337
# -*- coding: utf-8 -*- # # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_SignalFrame(object): def setupUi(self, SignalFrame): SignalFrame.setObjectName("SignalFrame") SignalFrame.resize(1057, 509) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(SignalFrame.sizePolicy().hasHeightForWidth()) SignalFrame.setSizePolicy(sizePolicy) SignalFrame.setMinimumSize(QtCore.QSize(0, 0)) SignalFrame.setMaximumSize(QtCore.QSize(16777215, 16777215)) SignalFrame.setSizeIncrement(QtCore.QSize(0, 0)) SignalFrame.setBaseSize(QtCore.QSize(0, 0)) SignalFrame.setMouseTracking(False) SignalFrame.setAcceptDrops(True) SignalFrame.setAutoFillBackground(False) SignalFrame.setStyleSheet("") SignalFrame.setFrameShape(QtWidgets.QFrame.NoFrame) SignalFrame.setFrameShadow(QtWidgets.QFrame.Raised) SignalFrame.setLineWidth(1) self.horizontalLayout = QtWidgets.QHBoxLayout(SignalFrame) self.horizontalLayout.setObjectName("horizontalLayout") self.gridLayout_2 = QtWidgets.QGridLayout() self.gridLayout_2.setSizeConstraint(QtWidgets.QLayout.SetFixedSize) self.gridLayout_2.setObjectName("gridLayout_2") spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.gridLayout_2.addItem(spacerItem, 12, 0, 1, 1) self.horizontalLayout_5 = QtWidgets.QHBoxLayout() self.horizontalLayout_5.setSpacing(7) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.cbModulationType = QtWidgets.QComboBox(SignalFrame) self.cbModulationType.setObjectName("cbModulationType") self.cbModulationType.addItem("") self.cbModulationType.addItem("") self.cbModulationType.addItem("") self.horizontalLayout_5.addWidget(self.cbModulationType) self.btnAdvancedModulationSettings = QtWidgets.QToolButton(SignalFrame) icon = QtGui.QIcon.fromTheme("configure") self.btnAdvancedModulationSettings.setIcon(icon) self.btnAdvancedModulationSettings.setIconSize(QtCore.QSize(16, 16)) self.btnAdvancedModulationSettings.setObjectName("btnAdvancedModulationSettings") self.horizontalLayout_5.addWidget(self.btnAdvancedModulationSettings) self.gridLayout_2.addLayout(self.horizontalLayout_5, 9, 1, 1, 1) self.labelModulation = QtWidgets.QLabel(SignalFrame) self.labelModulation.setObjectName("labelModulation") self.gridLayout_2.addWidget(self.labelModulation, 9, 0, 1, 1) self.chkBoxSyncSelection = QtWidgets.QCheckBox(SignalFrame) self.chkBoxSyncSelection.setChecked(True) self.chkBoxSyncSelection.setObjectName("chkBoxSyncSelection") self.gridLayout_2.addWidget(self.chkBoxSyncSelection, 22, 0, 1, 1) self.sliderSpectrogramMin = QtWidgets.QSlider(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sliderSpectrogramMin.sizePolicy().hasHeightForWidth()) self.sliderSpectrogramMin.setSizePolicy(sizePolicy) self.sliderSpectrogramMin.setMinimum(-150) self.sliderSpectrogramMin.setMaximum(10) self.sliderSpectrogramMin.setOrientation(QtCore.Qt.Horizontal) self.sliderSpectrogramMin.setObjectName("sliderSpectrogramMin") self.gridLayout_2.addWidget(self.sliderSpectrogramMin, 19, 1, 1, 1) self.spinBoxNoiseTreshold = QtWidgets.QDoubleSpinBox(SignalFrame) self.spinBoxNoiseTreshold.setDecimals(4) self.spinBoxNoiseTreshold.setMaximum(1.0) self.spinBoxNoiseTreshold.setSingleStep(0.0001) self.spinBoxNoiseTreshold.setObjectName("spinBoxNoiseTreshold") self.gridLayout_2.addWidget(self.spinBoxNoiseTreshold, 2, 1, 1, 1) self.chkBoxShowProtocol = QtWidgets.QCheckBox(SignalFrame) self.chkBoxShowProtocol.setObjectName("chkBoxShowProtocol") self.gridLayout_2.addWidget(self.chkBoxShowProtocol, 21, 0, 1, 1) self.labelNoise = QtWidgets.QLabel(SignalFrame) self.labelNoise.setObjectName("labelNoise") self.gridLayout_2.addWidget(self.labelNoise, 2, 0, 1, 1) self.lineEditSignalName = QtWidgets.QLineEdit(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lineEditSignalName.sizePolicy().hasHeightForWidth()) self.lineEditSignalName.setSizePolicy(sizePolicy) self.lineEditSignalName.setMinimumSize(QtCore.QSize(214, 0)) self.lineEditSignalName.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.lineEditSignalName.setAcceptDrops(False) self.lineEditSignalName.setObjectName("lineEditSignalName") self.gridLayout_2.addWidget(self.lineEditSignalName, 1, 0, 1, 2) self.cbProtoView = QtWidgets.QComboBox(SignalFrame) self.cbProtoView.setObjectName("cbProtoView") self.cbProtoView.addItem("") self.cbProtoView.addItem("") self.cbProtoView.addItem("") self.gridLayout_2.addWidget(self.cbProtoView, 21, 1, 1, 1) self.lInfoLenText = QtWidgets.QLabel(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lInfoLenText.sizePolicy().hasHeightForWidth()) self.lInfoLenText.setSizePolicy(sizePolicy) self.lInfoLenText.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse) self.lInfoLenText.setObjectName("lInfoLenText") self.gridLayout_2.addWidget(self.lInfoLenText, 4, 0, 1, 1) self.spinBoxInfoLen = QtWidgets.QSpinBox(SignalFrame) self.spinBoxInfoLen.setMinimumSize(QtCore.QSize(100, 0)) self.spinBoxInfoLen.setMinimum(1) self.spinBoxInfoLen.setMaximum(999999999) self.spinBoxInfoLen.setObjectName("spinBoxInfoLen") self.gridLayout_2.addWidget(self.spinBoxInfoLen, 4, 1, 1, 1) self.spinBoxTolerance = QtWidgets.QSpinBox(SignalFrame) self.spinBoxTolerance.setMinimumSize(QtCore.QSize(100, 0)) self.spinBoxTolerance.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.spinBoxTolerance.setMaximum(9999) self.spinBoxTolerance.setObjectName("spinBoxTolerance") self.gridLayout_2.addWidget(self.spinBoxTolerance, 7, 1, 1, 1) self.lErrorTolerance = QtWidgets.QLabel(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lErrorTolerance.sizePolicy().hasHeightForWidth()) self.lErrorTolerance.setSizePolicy(sizePolicy) self.lErrorTolerance.setMinimumSize(QtCore.QSize(0, 0)) self.lErrorTolerance.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.lErrorTolerance.setObjectName("lErrorTolerance") self.gridLayout_2.addWidget(self.lErrorTolerance, 7, 0, 1, 1) self.lSignalViewText = QtWidgets.QLabel(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lSignalViewText.sizePolicy().hasHeightForWidth()) self.lSignalViewText.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setUnderline(False) self.lSignalViewText.setFont(font) self.lSignalViewText.setObjectName("lSignalViewText") self.gridLayout_2.addWidget(self.lSignalViewText, 15, 0, 1, 1) self.line = QtWidgets.QFrame(SignalFrame) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.gridLayout_2.addWidget(self.line, 13, 0, 1, 2) self.lCenterOffset = QtWidgets.QLabel(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lCenterOffset.sizePolicy().hasHeightForWidth()) self.lCenterOffset.setSizePolicy(sizePolicy) self.lCenterOffset.setMinimumSize(QtCore.QSize(0, 0)) self.lCenterOffset.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.lCenterOffset.setWhatsThis("") self.lCenterOffset.setObjectName("lCenterOffset") self.gridLayout_2.addWidget(self.lCenterOffset, 3, 0, 1, 1) self.spinBoxCenterOffset = QtWidgets.QDoubleSpinBox(SignalFrame) self.spinBoxCenterOffset.setMinimumSize(QtCore.QSize(100, 0)) self.spinBoxCenterOffset.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.spinBoxCenterOffset.setDecimals(4) self.spinBoxCenterOffset.setMinimum(-3.15) self.spinBoxCenterOffset.setMaximum(6.28) self.spinBoxCenterOffset.setSingleStep(0.0001) self.spinBoxCenterOffset.setObjectName("spinBoxCenterOffset") self.gridLayout_2.addWidget(self.spinBoxCenterOffset, 3, 1, 1, 1) self.btnAutoDetect = QtWidgets.QPushButton(SignalFrame) icon = QtGui.QIcon.fromTheme("system-software-update") self.btnAutoDetect.setIcon(icon) self.btnAutoDetect.setIconSize(QtCore.QSize(16, 16)) self.btnAutoDetect.setCheckable(True) self.btnAutoDetect.setChecked(True) self.btnAutoDetect.setObjectName("btnAutoDetect") self.gridLayout_2.addWidget(self.btnAutoDetect, 11, 0, 1, 2) self.cbSignalView = QtWidgets.QComboBox(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.cbSignalView.sizePolicy().hasHeightForWidth()) self.cbSignalView.setSizePolicy(sizePolicy) self.cbSignalView.setObjectName("cbSignalView") self.cbSignalView.addItem("") self.cbSignalView.addItem("") self.cbSignalView.addItem("") self.gridLayout_2.addWidget(self.cbSignalView, 15, 1, 1, 1) self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.btnSaveSignal = QtWidgets.QToolButton(SignalFrame) self.btnSaveSignal.setMinimumSize(QtCore.QSize(24, 24)) self.btnSaveSignal.setMaximumSize(QtCore.QSize(24, 24)) icon = QtGui.QIcon.fromTheme("document-save") self.btnSaveSignal.setIcon(icon) self.btnSaveSignal.setObjectName("btnSaveSignal") self.gridLayout.addWidget(self.btnSaveSignal, 0, 3, 1, 1) spacerItem1 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.gridLayout.addItem(spacerItem1, 0, 2, 1, 1) self.btnCloseSignal = QtWidgets.QToolButton(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.btnCloseSignal.sizePolicy().hasHeightForWidth()) self.btnCloseSignal.setSizePolicy(sizePolicy) self.btnCloseSignal.setMinimumSize(QtCore.QSize(24, 24)) self.btnCloseSignal.setMaximumSize(QtCore.QSize(24, 24)) self.btnCloseSignal.setStyleSheet("color:red;") icon = QtGui.QIcon.fromTheme("window-close") self.btnCloseSignal.setIcon(icon) self.btnCloseSignal.setObjectName("btnCloseSignal") self.gridLayout.addWidget(self.btnCloseSignal, 0, 9, 1, 1) self.lSignalTyp = QtWidgets.QLabel(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lSignalTyp.sizePolicy().hasHeightForWidth()) self.lSignalTyp.setSizePolicy(sizePolicy) self.lSignalTyp.setObjectName("lSignalTyp") self.gridLayout.addWidget(self.lSignalTyp, 0, 1, 1, 1) self.lSignalNr = QtWidgets.QLabel(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lSignalNr.sizePolicy().hasHeightForWidth()) self.lSignalNr.setSizePolicy(sizePolicy) self.lSignalNr.setWordWrap(False) self.lSignalNr.setIndent(-1) self.lSignalNr.setObjectName("lSignalNr") self.gridLayout.addWidget(self.lSignalNr, 0, 0, 1, 1) self.btnInfo = QtWidgets.QToolButton(SignalFrame) self.btnInfo.setMinimumSize(QtCore.QSize(24, 24)) self.btnInfo.setMaximumSize(QtCore.QSize(24, 24)) icon = QtGui.QIcon.fromTheme("dialog-information") self.btnInfo.setIcon(icon) self.btnInfo.setObjectName("btnInfo") self.gridLayout.addWidget(self.btnInfo, 0, 6, 1, 1) self.btnReplay = QtWidgets.QToolButton(SignalFrame) self.btnReplay.setMinimumSize(QtCore.QSize(24, 24)) self.btnReplay.setMaximumSize(QtCore.QSize(24, 24)) self.btnReplay.setText("") icon = QtGui.QIcon.fromTheme("media-playback-start") self.btnReplay.setIcon(icon) self.btnReplay.setObjectName("btnReplay") self.gridLayout.addWidget(self.btnReplay, 0, 5, 1, 1) self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 2) self.labelFFTWindowSize = QtWidgets.QLabel(SignalFrame) self.labelFFTWindowSize.setObjectName("labelFFTWindowSize") self.gridLayout_2.addWidget(self.labelFFTWindowSize, 18, 0, 1, 1) self.sliderFFTWindowSize = QtWidgets.QSlider(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sliderFFTWindowSize.sizePolicy().hasHeightForWidth()) self.sliderFFTWindowSize.setSizePolicy(sizePolicy) self.sliderFFTWindowSize.setMinimum(6) self.sliderFFTWindowSize.setMaximum(15) self.sliderFFTWindowSize.setOrientation(QtCore.Qt.Horizontal) self.sliderFFTWindowSize.setObjectName("sliderFFTWindowSize") self.gridLayout_2.addWidget(self.sliderFFTWindowSize, 18, 1, 1, 1) self.sliderSpectrogramMax = QtWidgets.QSlider(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sliderSpectrogramMax.sizePolicy().hasHeightForWidth()) self.sliderSpectrogramMax.setSizePolicy(sizePolicy) self.sliderSpectrogramMax.setMinimum(-150) self.sliderSpectrogramMax.setMaximum(10) self.sliderSpectrogramMax.setOrientation(QtCore.Qt.Horizontal) self.sliderSpectrogramMax.setObjectName("sliderSpectrogramMax") self.gridLayout_2.addWidget(self.sliderSpectrogramMax, 20, 1, 1, 1) self.labelSpectrogramMin = QtWidgets.QLabel(SignalFrame) self.labelSpectrogramMin.setObjectName("labelSpectrogramMin") self.gridLayout_2.addWidget(self.labelSpectrogramMin, 19, 0, 1, 1) self.labelSpectrogramMax = QtWidgets.QLabel(SignalFrame) self.labelSpectrogramMax.setObjectName("labelSpectrogramMax") self.gridLayout_2.addWidget(self.labelSpectrogramMax, 20, 0, 1, 1) self.horizontalLayout.addLayout(self.gridLayout_2) self.splitter = QtWidgets.QSplitter(SignalFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth()) self.splitter.setSizePolicy(sizePolicy) self.splitter.setStyleSheet("QSplitter::handle:vertical {\n" "margin: 4px 0px;\n" " background-color: qlineargradient(x1:0, y1:0, x2:1, y2:0, \n" "stop:0 rgba(255, 255, 255, 0), \n" "stop:0.5 rgba(100, 100, 100, 100), \n" "stop:1 rgba(255, 255, 255, 0));\n" " image: url(:/icons/data/icons/splitter_handle_horizontal.svg);\n" "}") self.splitter.setFrameShape(QtWidgets.QFrame.NoFrame) self.splitter.setLineWidth(1) self.splitter.setOrientation(QtCore.Qt.Vertical) self.splitter.setHandleWidth(6) self.splitter.setChildrenCollapsible(False) self.splitter.setObjectName("splitter") self.layoutWidget = QtWidgets.QWidget(self.splitter) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.stackedWidget = QtWidgets.QStackedWidget(self.layoutWidget) self.stackedWidget.setLineWidth(0) self.stackedWidget.setObjectName("stackedWidget") self.pageSignal = QtWidgets.QWidget() self.pageSignal.setObjectName("pageSignal") self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.pageSignal) self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_6.setSpacing(0) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.gvLegend = LegendGraphicView(self.pageSignal) self.gvLegend.setMinimumSize(QtCore.QSize(0, 150)) self.gvLegend.setMaximumSize(QtCore.QSize(30, 16777215)) self.gvLegend.setFrameShape(QtWidgets.QFrame.NoFrame) self.gvLegend.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.gvLegend.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn) self.gvLegend.setInteractive(False) self.gvLegend.setResizeAnchor(QtWidgets.QGraphicsView.AnchorViewCenter) self.gvLegend.setRubberBandSelectionMode(QtCore.Qt.ContainsItemShape) self.gvLegend.setOptimizationFlags(QtWidgets.QGraphicsView.DontSavePainterState) self.gvLegend.setObjectName("gvLegend") self.horizontalLayout_6.addWidget(self.gvLegend) self.gvSignal = EpicGraphicView(self.pageSignal) self.gvSignal.setEnabled(True) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.gvSignal.sizePolicy().hasHeightForWidth()) self.gvSignal.setSizePolicy(sizePolicy) self.gvSignal.setMinimumSize(QtCore.QSize(0, 150)) self.gvSignal.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.gvSignal.setMouseTracking(True) self.gvSignal.setFocusPolicy(QtCore.Qt.WheelFocus) self.gvSignal.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu) self.gvSignal.setAutoFillBackground(False) self.gvSignal.setStyleSheet("") self.gvSignal.setFrameShape(QtWidgets.QFrame.NoFrame) self.gvSignal.setFrameShadow(QtWidgets.QFrame.Raised) self.gvSignal.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.gvSignal.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn) self.gvSignal.setInteractive(False) self.gvSignal.setRenderHints(QtGui.QPainter.Antialiasing|QtGui.QPainter.TextAntialiasing) self.gvSignal.setDragMode(QtWidgets.QGraphicsView.NoDrag) self.gvSignal.setCacheMode(QtWidgets.QGraphicsView.CacheNone) self.gvSignal.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor) self.gvSignal.setResizeAnchor(QtWidgets.QGraphicsView.NoAnchor) self.gvSignal.setViewportUpdateMode(QtWidgets.QGraphicsView.MinimalViewportUpdate) self.gvSignal.setRubberBandSelectionMode(QtCore.Qt.ContainsItemShape) self.gvSignal.setOptimizationFlags(QtWidgets.QGraphicsView.DontClipPainter|QtWidgets.QGraphicsView.DontSavePainterState) self.gvSignal.setObjectName("gvSignal") self.horizontalLayout_6.addWidget(self.gvSignal) self.stackedWidget.addWidget(self.pageSignal) self.pageSpectrogram = QtWidgets.QWidget() self.pageSpectrogram.setObjectName("pageSpectrogram") self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.pageSpectrogram) self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_4.setSpacing(0) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.gvSpectrogram = SpectrogramGraphicView(self.pageSpectrogram) self.gvSpectrogram.setMouseTracking(True) self.gvSpectrogram.setFrameShape(QtWidgets.QFrame.NoFrame) self.gvSpectrogram.setInteractive(False) self.gvSpectrogram.setRenderHints(QtGui.QPainter.TextAntialiasing) self.gvSpectrogram.setCacheMode(QtWidgets.QGraphicsView.CacheNone) self.gvSpectrogram.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor) self.gvSpectrogram.setViewportUpdateMode(QtWidgets.QGraphicsView.MinimalViewportUpdate) self.gvSpectrogram.setOptimizationFlags(QtWidgets.QGraphicsView.DontClipPainter|QtWidgets.QGraphicsView.DontSavePainterState) self.gvSpectrogram.setObjectName("gvSpectrogram") self.horizontalLayout_4.addWidget(self.gvSpectrogram) self.stackedWidget.addWidget(self.pageSpectrogram) self.horizontalLayout_2.addWidget(self.stackedWidget) self.verticalLayout_5 = QtWidgets.QVBoxLayout() self.verticalLayout_5.setObjectName("verticalLayout_5") self.lYScale = QtWidgets.QLabel(self.layoutWidget) self.lYScale.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)) self.lYScale.setObjectName("lYScale") self.verticalLayout_5.addWidget(self.lYScale) self.sliderYScale = QtWidgets.QSlider(self.layoutWidget) self.sliderYScale.setMinimum(1) self.sliderYScale.setMaximum(100) self.sliderYScale.setOrientation(QtCore.Qt.Vertical) self.sliderYScale.setTickPosition(QtWidgets.QSlider.TicksBelow) self.sliderYScale.setObjectName("sliderYScale") self.verticalLayout_5.addWidget(self.sliderYScale) self.horizontalLayout_2.addLayout(self.verticalLayout_5) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.btnShowHideStartEnd = QtWidgets.QToolButton(self.layoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.btnShowHideStartEnd.sizePolicy().hasHeightForWidth()) self.btnShowHideStartEnd.setSizePolicy(sizePolicy) self.btnShowHideStartEnd.setAutoFillBackground(False) self.btnShowHideStartEnd.setStyleSheet("") icon = QtGui.QIcon.fromTheme("arrow-down-double") self.btnShowHideStartEnd.setIcon(icon) self.btnShowHideStartEnd.setCheckable(True) self.btnShowHideStartEnd.setObjectName("btnShowHideStartEnd") self.horizontalLayout_3.addWidget(self.btnShowHideStartEnd) self.lNumSelectedSamples = QtWidgets.QLabel(self.layoutWidget) self.lNumSelectedSamples.setObjectName("lNumSelectedSamples") self.horizontalLayout_3.addWidget(self.lNumSelectedSamples) self.lTextSelectedSamples = QtWidgets.QLabel(self.layoutWidget) self.lTextSelectedSamples.setObjectName("lTextSelectedSamples") self.horizontalLayout_3.addWidget(self.lTextSelectedSamples) self.line_3 = QtWidgets.QFrame(self.layoutWidget) self.line_3.setFrameShape(QtWidgets.QFrame.VLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.horizontalLayout_3.addWidget(self.line_3) self.lDuration = QtWidgets.QLabel(self.layoutWidget) self.lDuration.setObjectName("lDuration") self.horizontalLayout_3.addWidget(self.lDuration) self.line_2 = QtWidgets.QFrame(self.layoutWidget) self.line_2.setFrameShape(QtWidgets.QFrame.VLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.horizontalLayout_3.addWidget(self.line_2) self.labelRSSI = QtWidgets.QLabel(self.layoutWidget) self.labelRSSI.setObjectName("labelRSSI") self.horizontalLayout_3.addWidget(self.labelRSSI) spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem2) self.btnFilter = QtWidgets.QToolButton(self.layoutWidget) icon = QtGui.QIcon.fromTheme("view-filter") self.btnFilter.setIcon(icon) self.btnFilter.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup) self.btnFilter.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon) self.btnFilter.setArrowType(QtCore.Qt.NoArrow) self.btnFilter.setObjectName("btnFilter") self.horizontalLayout_3.addWidget(self.btnFilter) self.verticalLayout.addLayout(self.horizontalLayout_3) self.additionalInfos = QtWidgets.QHBoxLayout() self.additionalInfos.setSpacing(6) self.additionalInfos.setObjectName("additionalInfos") self.lStart = QtWidgets.QLabel(self.layoutWidget) self.lStart.setObjectName("lStart") self.additionalInfos.addWidget(self.lStart) self.spinBoxSelectionStart = QtWidgets.QSpinBox(self.layoutWidget) self.spinBoxSelectionStart.setReadOnly(False) self.spinBoxSelectionStart.setMaximum(99999999) self.spinBoxSelectionStart.setObjectName("spinBoxSelectionStart") self.additionalInfos.addWidget(self.spinBoxSelectionStart) self.lEnd = QtWidgets.QLabel(self.layoutWidget) self.lEnd.setObjectName("lEnd") self.additionalInfos.addWidget(self.lEnd) self.spinBoxSelectionEnd = QtWidgets.QSpinBox(self.layoutWidget) self.spinBoxSelectionEnd.setMaximum(99999999) self.spinBoxSelectionEnd.setObjectName("spinBoxSelectionEnd") self.additionalInfos.addWidget(self.spinBoxSelectionEnd) spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.additionalInfos.addItem(spacerItem3) self.lZoomText = QtWidgets.QLabel(self.layoutWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lZoomText.sizePolicy().hasHeightForWidth()) self.lZoomText.setSizePolicy(sizePolicy) self.lZoomText.setMinimumSize(QtCore.QSize(0, 0)) self.lZoomText.setMaximumSize(QtCore.QSize(16777215, 16777215)) font = QtGui.QFont() font.setItalic(False) font.setUnderline(False) self.lZoomText.setFont(font) self.lZoomText.setTextFormat(QtCore.Qt.PlainText) self.lZoomText.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.lZoomText.setObjectName("lZoomText") self.additionalInfos.addWidget(self.lZoomText) self.spinBoxXZoom = QtWidgets.QSpinBox(self.layoutWidget) self.spinBoxXZoom.setMinimum(100) self.spinBoxXZoom.setMaximum(999999999) self.spinBoxXZoom.setObjectName("spinBoxXZoom") self.additionalInfos.addWidget(self.spinBoxXZoom) spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.additionalInfos.addItem(spacerItem4) self.lSamplesInView = QtWidgets.QLabel(self.layoutWidget) self.lSamplesInView.setObjectName("lSamplesInView") self.additionalInfos.addWidget(self.lSamplesInView) self.lStrich = QtWidgets.QLabel(self.layoutWidget) self.lStrich.setObjectName("lStrich") self.additionalInfos.addWidget(self.lStrich) self.lSamplesTotal = QtWidgets.QLabel(self.layoutWidget) self.lSamplesTotal.setObjectName("lSamplesTotal") self.additionalInfos.addWidget(self.lSamplesTotal) self.lSamplesViewText = QtWidgets.QLabel(self.layoutWidget) self.lSamplesViewText.setObjectName("lSamplesViewText") self.additionalInfos.addWidget(self.lSamplesViewText) self.verticalLayout.addLayout(self.additionalInfos) self.txtEdProto = TextEditProtocolView(self.splitter) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.txtEdProto.sizePolicy().hasHeightForWidth()) self.txtEdProto.setSizePolicy(sizePolicy) self.txtEdProto.setMinimumSize(QtCore.QSize(0, 80)) self.txtEdProto.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.txtEdProto.setBaseSize(QtCore.QSize(0, 0)) self.txtEdProto.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu) self.txtEdProto.setAcceptDrops(False) self.txtEdProto.setObjectName("txtEdProto") self.horizontalLayout.addWidget(self.splitter) self.retranslateUi(SignalFrame) self.stackedWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(SignalFrame) SignalFrame.setTabOrder(self.btnSaveSignal, self.btnInfo) SignalFrame.setTabOrder(self.btnInfo, self.btnCloseSignal) SignalFrame.setTabOrder(self.btnCloseSignal, self.lineEditSignalName) SignalFrame.setTabOrder(self.lineEditSignalName, self.spinBoxNoiseTreshold) SignalFrame.setTabOrder(self.spinBoxNoiseTreshold, self.spinBoxCenterOffset) SignalFrame.setTabOrder(self.spinBoxCenterOffset, self.spinBoxInfoLen) SignalFrame.setTabOrder(self.spinBoxInfoLen, self.spinBoxTolerance) SignalFrame.setTabOrder(self.spinBoxTolerance, self.chkBoxShowProtocol) SignalFrame.setTabOrder(self.chkBoxShowProtocol, self.cbProtoView) SignalFrame.setTabOrder(self.cbProtoView, self.chkBoxSyncSelection) SignalFrame.setTabOrder(self.chkBoxSyncSelection, self.txtEdProto) SignalFrame.setTabOrder(self.txtEdProto, self.btnShowHideStartEnd) SignalFrame.setTabOrder(self.btnShowHideStartEnd, self.spinBoxSelectionStart) SignalFrame.setTabOrder(self.spinBoxSelectionStart, self.spinBoxSelectionEnd) def retranslateUi(self, SignalFrame): _translate = QtCore.QCoreApplication.translate SignalFrame.setWindowTitle(_translate("SignalFrame", "Frame")) self.cbModulationType.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Choose signals modulation:</p><ul><li>Amplitude Shift Keying (ASK)</li><li>Frequency Shift Keying (FSK)</li><li>Phase Shift Keying (PSK)</li></ul></body></html>")) self.cbModulationType.setItemText(0, _translate("SignalFrame", "ASK")) self.cbModulationType.setItemText(1, _translate("SignalFrame", "FSK")) self.cbModulationType.setItemText(2, _translate("SignalFrame", "PSK")) self.btnAdvancedModulationSettings.setText(_translate("SignalFrame", "...")) self.labelModulation.setText(_translate("SignalFrame", "Modulation:")) self.chkBoxSyncSelection.setToolTip(_translate("SignalFrame", "If this is set to true, your selected protocol bits will show up in the signal view, and vice versa.")) self.chkBoxSyncSelection.setText(_translate("SignalFrame", "Sync Selection")) self.spinBoxNoiseTreshold.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Set the <span style=\" font-weight:600;\">noise magnitude</span> of your signal. You can tune this value to mute noise in your signal and reveal the true data.</p></body></html>")) self.chkBoxShowProtocol.setToolTip(_translate("SignalFrame", "Show the extracted protocol based on the parameters InfoLen, PauseLen and ZeroTreshold (in QuadratureDemod-View).\n" "\n" "If you want your protocol to be better seperated, edit the PauseLen using right-click menu from a selection in SignalView or ProtocolView.")) self.chkBoxShowProtocol.setText(_translate("SignalFrame", "Show Signal as")) self.labelNoise.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Set the <span style=\" font-weight:600;\">noise magnitude</span> of your signal. You can tune this value to mute noise in your signal and reveal the true data.</p></body></html>")) self.labelNoise.setText(_translate("SignalFrame", "Noise:")) self.lineEditSignalName.setText(_translate("SignalFrame", "SignalName")) self.cbProtoView.setItemText(0, _translate("SignalFrame", "Bits")) self.cbProtoView.setItemText(1, _translate("SignalFrame", "Hex")) self.cbProtoView.setItemText(2, _translate("SignalFrame", "ASCII")) self.lInfoLenText.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the length of one (raw) bit <span style=\" font-weight:600;\">in samples</span>.</p><p><br/></p><p>Tune this value using either <span style=\" font-style:italic;\">the spinbox on the right</span> or the <span style=\" font-style:italic;\">context-menu of the SignalView</span>.</p></body></html>")) self.lInfoLenText.setText(_translate("SignalFrame", "Bit Length:")) self.spinBoxInfoLen.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the length of one (raw) bit <span style=\" font-weight:600;\">in samples</span>.</p><p><br/></p><p>Tune this value using either <span style=\" font-style:italic;\">the spinbox on the right</span> or the <span style=\" font-style:italic;\">context-menu of the SignalView</span>.</p></body></html>")) self.spinBoxTolerance.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the error tolerance for determining the <span style=\" font-weight:600;\">pulse lengths</span> in the demodulated signal.</p><p><span style=\" font-weight:400; font-style:italic;\">Example:</span> Say, we are reading a ones pulse and the tolerance value was set to 5. Then 5 errors (which must follow sequentially) are accepted.</p><p>Tune this value if you have <span style=\" font-weight:600;\">spiky data</span> after demodulation.</p></body></html>")) self.lErrorTolerance.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the error tolerance for determining the <span style=\" font-weight:600;\">pulse lengths</span> in the demodulated signal.</p><p><span style=\" font-weight:400; font-style:italic;\">Example:</span> Say, we are reading a ones pulse and the tolerance value was set to 5. Then 5 errors (which must follow sequentially) are accepted.</p><p>Tune this value if you have <span style=\" font-weight:600;\">spiky data</span> after demodulation.</p></body></html>")) self.lErrorTolerance.setText(_translate("SignalFrame", "Error Tolerance:")) self.lSignalViewText.setText(_translate("SignalFrame", "Signal View:")) self.lCenterOffset.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the threshold used for determining if a <span style=\" font-weight:600;\">bit is one or zero</span>. You can set it here or grab the middle of the area in <span style=\" font-style:italic;\">Quadrature Demod View.</span></p></body></html>")) self.lCenterOffset.setText(_translate("SignalFrame", "Center:")) self.spinBoxCenterOffset.setToolTip(_translate("SignalFrame", "<html><head/><body><p>This is the threshold used for determining if a <span style=\" font-weight:600;\">bit is one or zero</span>. You can set it here or grab the middle of the area in <span style=\" font-style:italic;\">Quadrature Demod View</span>.</p></body></html>")) self.btnAutoDetect.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Automatically detect Center and Bit Length, when you change the demodulation type. You can disable this behaviour for faster switching between demodulations.</p></body></html>")) self.btnAutoDetect.setText(_translate("SignalFrame", "Autodetect parameters")) self.cbSignalView.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Choose the view of your signal. Analog, Demodulated or Spectrogram.</p><p>The quadrature demodulation uses a <span style=\" font-weight:600;\">treshold of magnitude,</span> to <span style=\" font-weight:600;\">supress noise</span>. All samples with a magnitude lower than this treshold will be eliminated (set to <span style=\" font-style:italic;\">-127</span>) after demod.</p><p>Tune this value by selecting a <span style=\" font-style:italic;\">noisy area</span> and mark it as noise using <span style=\" font-weight:600;\">context menu</span>.</p><p>Current noise treshold is: </p></body></html>")) self.cbSignalView.setItemText(0, _translate("SignalFrame", "Analog")) self.cbSignalView.setItemText(1, _translate("SignalFrame", "Demodulated")) self.cbSignalView.setItemText(2, _translate("SignalFrame", "Spectrogram")) self.btnSaveSignal.setText(_translate("SignalFrame", "...")) self.btnCloseSignal.setText(_translate("SignalFrame", "X")) self.lSignalTyp.setText(_translate("SignalFrame", "<Signaltyp>")) self.lSignalNr.setText(_translate("SignalFrame", "1:")) self.btnInfo.setText(_translate("SignalFrame", "...")) self.btnReplay.setToolTip(_translate("SignalFrame", "Replay signal")) self.labelFFTWindowSize.setText(_translate("SignalFrame", "FFT Window Size:")) self.labelSpectrogramMin.setText(_translate("SignalFrame", "Data<sub>min</sub>:")) self.labelSpectrogramMax.setText(_translate("SignalFrame", "Data<sub>max</sub>:")) self.lYScale.setText(_translate("SignalFrame", "Y-Scale")) self.btnShowHideStartEnd.setText(_translate("SignalFrame", "-")) self.lNumSelectedSamples.setToolTip(_translate("SignalFrame", "Number of currently selected samples.")) self.lNumSelectedSamples.setText(_translate("SignalFrame", "0")) self.lTextSelectedSamples.setToolTip(_translate("SignalFrame", "Number of currently selected samples.")) self.lTextSelectedSamples.setText(_translate("SignalFrame", "selected")) self.lDuration.setText(_translate("SignalFrame", "42 µs")) self.labelRSSI.setText(_translate("SignalFrame", "RSSI: 0,434")) self.btnFilter.setText(_translate("SignalFrame", "Filter (moving average)")) self.lStart.setText(_translate("SignalFrame", "Start:")) self.lEnd.setText(_translate("SignalFrame", "End:")) self.lZoomText.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Current (relative) Zoom. Standard is 100%, if you zoom in, this factor increases. You can directly set a value in the spinbox or use the <span style=\" font-weight:600;\">mousewheel to zoom</span>.</p></body></html>")) self.lZoomText.setText(_translate("SignalFrame", "X-Zoom:")) self.spinBoxXZoom.setToolTip(_translate("SignalFrame", "<html><head/><body><p>Current (relative) Zoom. Standard is 100%, if you zoom in, this factor increases. You can directly set a value in the spinbox or use the <span style=\" font-weight:600;\">mousewheel to zoom</span>.</p></body></html>")) self.spinBoxXZoom.setSuffix(_translate("SignalFrame", "%")) self.lSamplesInView.setText(_translate("SignalFrame", "0")) self.lStrich.setText(_translate("SignalFrame", "/")) self.lSamplesTotal.setText(_translate("SignalFrame", "0")) self.lSamplesViewText.setText(_translate("SignalFrame", "Samples in view")) from urh.ui.views.EpicGraphicView import EpicGraphicView from urh.ui.views.LegendGraphicView import LegendGraphicView from urh.ui.views.SpectrogramGraphicView import SpectrogramGraphicView from urh.ui.views.TextEditProtocolView import TextEditProtocolView from . import urh_rc
gpl-3.0
-743,312,303,342,755,300
68.472269
688
0.734614
false
drtoful/thrush
doc/source/conf.py
1
7801
# -*- coding: utf-8 -*- # # thrush documentation build configuration file, created by # sphinx-quickstart on Sun May 5 16:31:03 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('./../../')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'thrush' copyright = u'2013, Tobias Heinzen' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2' # The full version, including alpha/beta/rc tags. release = '0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'flasky' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'index_logo': False, 'github_fork': False} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['../theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['style.css'] html_style = 'style.css' # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'thrushdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'thrush.tex', u'thrush Documentation', u'Tobias Heinzen', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'thrush', u'thrush Documentation', [u'Tobias Heinzen'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'thrush', u'thrush Documentation', u'Tobias Heinzen', 'thrush', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
bsd-3-clause
1,619,120,018,686,060,300
31.102881
80
0.702987
false
appsembler/roles
logstash/templates/remove_old_indices.py
1
1444
#!/usr/bin/env python import logging import sys import curator import elasticsearch import certifi HOSTS = ["{{ logstash_output_elasticsearch_hosts | join('\", \"') }}"] USERNAME = '{{ logstash_output_elasticsearch_user }}' PASSWORD = '{{ logstash_output_elasticsearch_password }}' DELETE_OLDER_THAN = {{ logstash_remove_older_than }} def main(): for host in HOSTS: scheme, _, domain = host.rpartition('://') scheme = scheme if scheme else 'http' basic_auth_uri = '{}://{}:{}@{}'.format(scheme, USERNAME, PASSWORD, domain) client = elasticsearch.Elasticsearch([basic_auth_uri], verify_certs=True, ca_certs=certifi.where()) index_list = curator.IndexList(client) index_list.filter_by_regex(kind='prefix', value='logstash-') index_list.filter_by_age(source='name', direction='older', timestring='%Y.%m.%d', unit='days', unit_count=DELETE_OLDER_THAN) if len(index_list.indices): logging.info('Deleting indices: {}' .format(', '.join(index_list.indices))) delete_indices = curator.DeleteIndices(index_list) delete_indices.do_action() else: logging.info('No indices to delete') if __name__ == '__main__': logging.basicConfig(stream=sys.stdout, level=logging.INFO) main()
mit
-3,186,540,673,562,931,000
33.380952
83
0.580332
false
cogitare-ai/cogitare
cogitare/data/dataholder.py
1
15342
import torch import math from abc import ABCMeta, abstractmethod from cogitare import utils from six import add_metaclass import numpy from dask import threaded, delayed, compute, multiprocessing def _identity(x): return x @add_metaclass(ABCMeta) class AbsDataHolder(object): """ An abstract object that acts as a data holder. A data holder is a utility to hold datasets, which provide some simple functions to work with the dataset, such as sorting, splitting, dividing it into chunks, loading batches using multi-thread, and so on. It's the recommended way to pass data to Cogitare's models because it already provides a compatible interface to iterate over batches. To improve the performance, the data holder loads batches using multiprocessing and multithreading data loader with `Dask <http://dask.pydata.org/>`_. Usually, this object should not be used directly, only if you are developing a custom data loader. Cogitare already provides the following implementations for the most common data types: - Tensors: :class:`~cogitare.data.TensorHolder` - Numpy: :class:`~cogitare.data.NumpyHolder` - Callable (functions that receive the sample id, and returns its data): :class:`~cogitare.data.CallableHolder` - :class:`~cogitare.data.AutoHolder`: inspect the data to choose one of the available data holders. Args: data (torch.Tensor, numpy.ndarray, callable): the data to be managed by the data holder. batch_size (int): the size of the batch. shuffle (bool): if True, shuffles the dataset after each iteration. drop_last (bool): if True, then skip the batch if its size is lower that **batch_size** (can occur in the last batch). total_samples (int): the number of total samples. If provided, this will limit the number of samples to be accessed in the data. mode (str): must be one of: 'sequential', 'threaded', 'multiprocessing'. Use one of them to choose the batch loading methods. Take a loook here: https://dask.pydata.org/en/latest/scheduler-choice.html for an overview of the advantage of each mode. single (bool): if True, returns only the first element of each batch. Is designed to be used with models where you only use one sample per batch (batch_size == 1). So instead of returning a list with a single sample, with ``single == True``, the sample itself will be returned and not the list. on_sample_loaded (callable): if provided, this function will be called when a new sample is loaded. It must receive one argument, the sample. And return one value that will replace the sample data. This is used to apply pre-processing on single samples while loading. on_batch_loaded (callable): if provided, this function will be called when a new batch is loaded. It must receive one argument, the batch data. And return the batch after applying some operation on the data. This can be used to apply pre-processing functions on a batch of data (such as image filtering, moving the data to GPU, and etc). """ @property def total_samples(self): """Returns the number of individual samples in this dataset. """ return self._total_samples @total_samples.setter def total_samples(self, value): if hasattr(self._data, '__len__'): size = len(self._data) else: size = None if size is not None: utils.assert_raise(value <= size, ValueError, 'The value must be lesser or equal to the' 'length of the input data') utils.assert_raise(value >= 1, ValueError, 'number of samples must be greater or equal to 1') self._total_samples = value self._remaining_samples = value self._requires_reset = True @property def indices(self): if self._indices is None: self._indices = numpy.arange(self.total_samples) return self._indices @property def batch_size(self): """The size of the mini-batch used by the iterator. When a new batch_size is set, the iterator will reset. """ return self._batch_size @batch_size.setter def batch_size(self, value): self._batch_size = value self._requires_reset = True def __init__(self, data, batch_size=1, shuffle=True, drop_last=False, total_samples=None, mode='sequential', single=False, on_sample_loaded=None, on_batch_loaded=None): valid_modes = ['threaded', 'multiprocessing', 'sequential'] utils.assert_raise(mode in valid_modes, ValueError, '"mode" must be one of: ' + ', '.join(valid_modes)) if on_sample_loaded is None: on_sample_loaded = _identity if on_batch_loaded is None: on_batch_loaded = _identity self._indices = None self._single = single self._mode = mode self._total_samples = total_samples self._remaining_samples = None self._on_sample_loaded = on_sample_loaded self._on_batch_loaded = on_batch_loaded self._data = data self._batch_size = batch_size self._current_batch = 0 self._drop_last = drop_last self._shuffle = shuffle self._requires_reset = True if mode == 'sequential': self._get = None elif mode == 'threaded': self._get = threaded.get else: self._get = multiprocessing.get def _clone(self): return type(self)(data=self._data, batch_size=self._batch_size, shuffle=self._shuffle, drop_last=self._drop_last, total_samples=self._total_samples, mode=self._mode, single=self._single, on_sample_loaded=self._on_sample_loaded, on_batch_loaded=self._on_batch_loaded) def __repr__(self): """Using repr(data) or str(data), display the shape of the data. """ return '{} with {}x{} samples'.format(type(self).__name__, len(self), self._batch_size) def __getitem__(self, key): """Get a sample in the dataset using its indices. Example:: sample = data[0] sample2 = data[1] """ return self._on_sample_loaded(self.get_sample(self.indices[key])) def _get_batch_size(self): batch_size = min(self._batch_size, self._remaining_samples) if batch_size < self._batch_size and self._drop_last: self._requires_reset = True raise StopIteration if batch_size == 0: self._requires_reset = True raise StopIteration return batch_size def _get_batch(self): if self._requires_reset: self.reset() batch_size = self._get_batch_size() def load(loader): return [loader(self.__getitem__)(self._current_batch * self._batch_size + i) for i in range(batch_size)] if self._get: # use dask jobs = load(lambda x: delayed(x, traverse=False)) results = compute(jobs, scheduler=self._get)[0] else: results = load(_identity) self._current_batch += 1 self._remaining_samples -= batch_size results = self._on_batch_loaded(results) if self._single: return results[0] return results @abstractmethod def get_sample(self, key): pass def __len__(self): """Return the number of batches in the dataset. """ if self._drop_last: return self.total_samples // self._batch_size else: return (self.total_samples + self._batch_size - 1) // self._batch_size def __iter__(self): """Creates an iterator to iterate over batches in the dataset. After each iteration over the batches, the dataset will be shuffled if the **shuffle** parameter is True. Example:: for sample in data: print(sample) """ return self def __next__(self): return self._get_batch() next = __next__ def reset(self): """Reset the batch iterator. This method returns the iterator to the first sample, and shuffle the dataset if shuffle is enabled. """ self._requires_reset = False self._current_batch = 0 self._remaining_samples = self.total_samples if self._shuffle: self.shuffle() def shuffle(self): """Shuffle the samples in the dataset. This operation will not affect the original data. """ numpy.random.shuffle(self.indices) def split(self, ratio): """Split the data holder into two data holders. The first one will receive *total_samples * ratio* samples, and the second data holder will receive the remaining samples. Args: ratio (:obj:`float`): ratio of the split. Must be between 0 and 1. Returns: (data1, data2): two data holder, in the same type that the original. Example:: >>> print(data) TensorHolder with 875x64 samples >>> data1, data2 = data.split(0.8) >>> print(data1) TensorHolder with 700x64 samples >>> print(data2) TensorHolder with 175x64 samples """ utils.assert_raise(0 < ratio < 1, ValueError, '"ratio" must be between 0 and 1') pos = int(math.floor(self.total_samples * ratio)) data1 = self._clone() data2 = self._clone() data1._indices = self.indices[:pos] data2._indices = self.indices[pos:] data1._total_samples = pos data2._total_samples = self.total_samples - pos return data1, data2 def split_chunks(self, n): """Split the data holder into N data holders with the sample number of samples each. Args: n (int): number of new splits. Returns: output (list): list of N data holders. Example:: >>> print(data) TensorHolder with 875x64 samples >>> data1, data2, data3 = data.split_chunks(3) >>> print(data1) TensorHolder with 292x64 samples >>> print(data2) TensorHolder with 292x64 samples >>> print(data3) TensorHolder with 292x64 samples """ size = self.total_samples // n data = [] for i in range(n): begin, end = i * size, min((i + 1) * size, self.total_samples) holder = self._clone() holder._indices = self.indices[begin:end] holder._total_samples = end - begin data.append(holder) return data class CallableHolder(AbsDataHolder): """CallableHolder is a data holder for abritary data type. As data input, it uses a callable that receive the sample index as parameter, and must return the sample. It can be used to load non-Tensor or non-numpy datasets, such as texts, dicts, and anything else. You are free to use CallableHolder with any data type. .. note:: When using CallableHolder, you must specify the number of samples in the dataset. The callable will be called asking for samples from 0 to (total_samples - 1). Example:: >>> def load_sample(idx): ... return list(range(idx, idx + 10)) >>> # when using the CallableHolder. you must pass the number of samples to >>> # be loaded. >>> # you can set the total_samples using the parameter in the constructor >>> data = CallableHolder(load_sample, batch_size=8, total_samples=20) >>> # or by setting the property >>> data.total_samples = 20 >>> next(data) [[8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [17, 18, 19, 20, 21, 22, 23, 24, 25, 26]] """ @property def total_samples(self): """The number of samples in the dataset. You must set this value before accessing the data. """ if self._total_samples is None: raise ValueError('"total_samples" not defined. Callable objects requires the' ' number of total_samples before being used') return super(CallableHolder, self).total_samples @total_samples.setter def total_samples(self, value): return super(CallableHolder, self.__class__).total_samples.fset(self, value) def __init__(self, *args, **kwargs): super(CallableHolder, self).__init__(*args, **kwargs) def get_sample(self, key): return self._data(key) class TensorHolder(AbsDataHolder): """ A data holder to work with :class:`torch.Tensor` objects. Example:: >>> tensor = torch.Tensor([[1,2,3], [4,5,6], [7,8,9]]) >>> tensor 1 2 3 4 5 6 7 8 9 [torch.FloatTensor of size 3x3] >>> data = TensorHolder(tensor, batch_size=2) >>> for sample in data: ... print('Sample:') ... print(sample) ... print('Sample as tensor:') ... print(utils.to_tensor(sample)) Sample: [ 7 8 9 [torch.FloatTensor of size 3] , 4 5 6 [torch.FloatTensor of size 3] ] Sample as tensor: 7 8 9 4 5 6 [torch.FloatTensor of size 2x3] Sample: [ 1 2 3 [torch.FloatTensor of size 3] ] Sample as tensor: 1 2 3 [torch.FloatTensor of size 1x3] """ def __init__(self, *args, **kwargs): super(TensorHolder, self).__init__(*args, **kwargs) size = len(self._data) if self._total_samples is None: self.total_samples = size def get_sample(self, key): return self._data[key] def NumpyHolder(data, *args, **kwargs): """ When creating the object, it converts the numpy data to Tensor using :func:`torch.from_numpy` and then creates an :class:`~cogitare.data.TensorHolder` instance. """ data = torch.from_numpy(data) return TensorHolder(data, *args, **kwargs) def AutoHolder(data, *args, **kwargs): """Check the data type to infer which data holder to use. """ if torch.is_tensor(data): return TensorHolder(data, *args, **kwargs) elif isinstance(data, numpy.ndarray): return NumpyHolder(data, *args, **kwargs) elif callable(data): return CallableHolder(data, *args, **kwargs) else: raise ValueError('Unable to infer data type!')
mit
-4,970,938,022,273,481,000
32.352174
120
0.584865
false
Aeva/voxelpress
old_stuff/old_python_stuff/arduino/acm_firmwares/acm_kind.py
1
1717
from glob import glob from ..reprap_kind import ReprapKind class ReprapACM(ReprapKind): """Repraps which are controlled by an ACM device of some kind (usually an Arduino).""" def __init__(self, connection, firmware="Unknown", *args, **kargs): self.__serial = connection self.__buffer = False self.info = {} # Set a plausible printer uuid, which may be overridden by the # firmware driver. self.info["uuid"] = self.__serial.make_uuid(firmware) ReprapKind.__init__(self, *args, **kargs) def shutdown(self, disconnected=False): """Callback used to turn off the backend and release any resources.""" self.__serial.disconnect(disconnected) def gcode(self, line): """Send a line of gcode to the printer, and returns data if applicable.""" self.__serial.send(line) return self.__serial.poll() def __stream(self, fobject): """Extracts gcode commands from a file like object, removes comments and blank lines, and then streams the commands to the printer.""" self.hold() for line in fobject: if line.startswith(";"): continue code = line.split(";")[0].strip() self.gcode(code) def run_job(self, target): """Run a print job. Target can be a file path or file-like object.""" fobject = None if type(target) in [unicode, str]: found = glob(target) if found: # FIXME, should cue up multiple jobs, not just do one...? fobject = open(found[0]) if fobject: self.__stream(fobject)
gpl-3.0
1,986,246,930,187,726,600
32.666667
73
0.576005
false
Tjorriemorrie/trading
07_reinforcement/signals/sarsa.py
1
7351
''' signals by MA and RSI and Ichimoku ''' import pandas as pd import numpy as np from features import FeatureFactory import pickle from random import random, choice from pprint import pprint import time currencies = [ # 'AUDUSD', # 'EURGBP', # 'EURJPY', 'EURUSD', # 'GBPJPY', # 'GBPUSD', # 'NZDUSD', # 'USDCAD', # 'USDCHF', # 'USDJPY', ] intervals = [ # '60', '1440', ] actions = [ 'stay-out', 'enter-long', 'stay-long', 'exit-long', 'enter-short', 'stay-short', 'exit-short', ] def loadData(currency, interval): # print 'loading dataframe...' df = pd.read_csv( r'../data/' + currency.upper() + interval + '.csv', names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'], dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'}, # parse_dates=[[0, 1]], # index_col=0, ) # print df.tail() data = df.as_matrix() opens = data[:, 2].astype(float) highs = data[:, 3].astype(float) lows = data[:, 4].astype(float) closes = data[:, 5].astype(float) volumes = data[:, 6].astype(int) # print 'dataframe loaded' return opens, highs, lows, closes, volumes def loadThetas(currency, interval, cntFeatures): # print 'loading thetas' try: with open('models/{0}_{1}.thts'.format(currency, interval), 'r') as f: thetas = pickle.load(f) except IOError: thetas = [np.random.rand(cntFeatures) for a in actions] # pprint(thetas) # print 'thetas loaded' return thetas def saveThetas(currency, interval, thetas): # print 'saving thetas' with open('models/{0}_{1}.thts'.format(currency, interval), 'w') as f: pickle.dump(thetas, f) # print 'thetas saved' def getReward(rewards, s, a): ''' if action is stay-out: obviously no reward: we will then only enter trades if we expect positive returns if action is exiting: no reward as well: we will not enforce exiting positions, we will only exit when we expect negative returns. we get rewards only for entering and keeping positions (as long as positive returns are expected) ''' if a == 0: r = 0 elif a in [3, 6]: r = 0 else: r = rewards[s] return r def getActionStateValue(thetas, Fsa, a): # pprint(Fsa) # pprint(thetas[a]) Qsa = sum(f * t for f, t in zip(Fsa, thetas[a])) return float(Qsa) def getActionsAvailable(a): # stay-out: stay-out & enter-long & enter-short if a == 0: return [0, 1, 4] elif a == 1: return [2] elif a == 2: return [2, 3] elif a == 4: return [5] elif a == 5: return [5, 6] else: raise Exception('no available actions for {0}'.format(a)) def getAction(thetas, features, a): # exploration actionsAvailable = getActionsAvailable(a) # print 'actions available', actionsAvailable if random() < epsilon: a = choice(actionsAvailable) # exploitation else: aMax = None QsaHighest = -1000 for a in actionsAvailable: Qsa = getActionStateValue(thetas, features[a], a) if Qsa > QsaHighest: QsaHighest = Qsa aMax = a a = aMax return a ff = FeatureFactory() alpha = 0.1 epsilon = 0.1 gamma = 0.9 if __name__ == '__main__': interval = '1440' # interval = choice(intervals) for currency in currencies: print '\n', currency, interval # load data opens, highs, lows, closes, volumes = loadData(currency, interval) print 'data loaded' dataSize = len(closes) # extract features features = ff.getFeatures(opens, highs, lows, closes, volumes) print 'get features' cntFeatures = len(features) # pprint(features) # get rewards print 'get rewards' rewards = ff.getRewardsCycle(closes) # load thetas print 'load thetas' thetas = loadThetas(currency, interval, cntFeatures) # train outcomes = [] durations = [] print 'start' for i in xrange(100): # initialize state and action a = actions.index('stay-out') # print 'a start', a, actions[a] # print 'len closes', len(closes) # pprint(range(len(closes))) s = choice(range(len(closes))) # print 's start', s iniS = s # keep going until we hit an exit (that will be 1 episode/trade) while a not in [3, 6]: # set of features at state/index and action/noeffect Fsa = features[s] # take action a # observe r r = getReward(rewards, s, a) # print s, 'r of', r, 'for', actions[a], 'from', iniS, 'till', s # next state ss = s + 1 if ss >= dataSize: break # Qsa (action-state-values) Qsa = getActionStateValue(thetas, Fsa, a) # print s, 'Qsa', Qsa # start delta delta = r - Qsa # print s, 'delta start', delta # get next action aa = getAction(thetas, features, a) # print s, 'a', aa, actions[aa] # get features and Qsa Fsa = features[aa] Qsa = getActionStateValue(thetas, Fsa, aa) # end delta delta += gamma * Qsa # print s, 'delta end', delta # update thetas thetas[a] = [theta + alpha * delta for theta in thetas[a]] # pprint(thetas[a]) # normalize thetas # pprint(thetas[a]) mmin = min(thetas[a]) mmax = max(thetas[a]) rrange = mmax - mmin # print 'N', 'min', mmin, 'max', mmax, 'range', rrange thetas[a] = [(mmax - t) / rrange for t in thetas[a]] # print s, 'normalized', min(thetas[a]), max(thetas[a]) # until s is terminal if aa in [3, 6]: outcomes.append(closes[s] - closes[iniS] if aa == 3 else closes[iniS] - closes[s]) durations.append(s - iniS) print '\n', '#', len(outcomes), actions[a], r print 'Net outcomes', sum(outcomes) print 'Avg durations', int(sum(durations) / len(durations)) wins = sum([1. for o in outcomes if o > 0]) print currency, 'Win ratio', int(wins / len(outcomes) * 100) # time.sleep(0.3) # if iniS not set, then set it if a == 0 and aa in [1, 4]: iniS = s # s <- s' a <- a' s = ss a = aa # save periodically if i % 100 == 99: saveThetas(currency, interval, thetas) # print 'Net outcomes', sum(outcomes) # print currency, 'Win ratio', int(wins / len(outcomes) * 100) saveThetas(currency, interval, thetas)
mit
1,470,180,100,407,404,800
27.492248
134
0.513264
false
TougalooCSC/CSC455Spring15Prototypes
prototype01/migrations/versions/47f6450771a6_.py
1
2742
"""empty message Revision ID: 47f6450771a6 Revises: None Create Date: 2015-04-15 16:44:40.764749 """ # revision identifiers, used by Alembic. revision = '47f6450771a6' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('decks') op.drop_table('users') op.drop_table('flashcard_responses') op.drop_table('flashcards') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('flashcards', sa.Column('created_at', sa.DATETIME(), nullable=True), sa.Column('updated_at', sa.DATETIME(), nullable=True), sa.Column('is_active', sa.BOOLEAN(), nullable=True), sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('question_text', sa.VARCHAR(length=256), nullable=True), sa.Column('question_answer', sa.VARCHAR(length=127), nullable=True), sa.Column('created_by', sa.INTEGER(), nullable=True), sa.ForeignKeyConstraint(['created_by'], [u'users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('flashcard_responses', sa.Column('created_at', sa.DATETIME(), nullable=True), sa.Column('updated_at', sa.DATETIME(), nullable=True), sa.Column('is_active', sa.BOOLEAN(), nullable=True), sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('response', sa.VARCHAR(length=127), nullable=True), sa.Column('flashcard_id', sa.INTEGER(), nullable=True), sa.Column('user_id', sa.INTEGER(), nullable=True), sa.ForeignKeyConstraint(['flashcard_id'], [u'flashcards.id'], ), sa.ForeignKeyConstraint(['user_id'], [u'users.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_table('users', sa.Column('created_at', sa.DATETIME(), nullable=True), sa.Column('updated_at', sa.DATETIME(), nullable=True), sa.Column('is_active', sa.BOOLEAN(), nullable=True), sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('name', sa.VARCHAR(length=120), nullable=True), sa.Column('email', sa.VARCHAR(length=120), nullable=True), sa.Column('password', sa.VARCHAR(length=30), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_table('decks', sa.Column('created_at', sa.DATETIME(), nullable=True), sa.Column('updated_at', sa.DATETIME(), nullable=True), sa.Column('is_active', sa.BOOLEAN(), nullable=True), sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('title', sa.VARCHAR(length=127), nullable=True), sa.Column('created_by', sa.INTEGER(), nullable=True), sa.ForeignKeyConstraint(['created_by'], [u'users.id'], ), sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ###
gpl-3.0
7,827,077,548,879,713,000
37.619718
72
0.660832
false
gitprouser/appengine-bottle-skeleton
lib/ndb/msgprop_test.py
1
17472
# # Copyright 2008 The ndb Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for msgprop.py.""" from protorpc import messages from . import model from . import msgprop from . import test_utils from .google_imports import datastore_errors from .google_test_imports import unittest class Color(messages.Enum): RED = 620 GREEN = 495 BLUE = 450 SAMPLE_PB = r"""key < app: "ndb-test-app-id" path < Element { type: "Storage" id: 1 } > > entity_group < Element { type: "Storage" id: 1 } > property < name: "greet.text" value < stringValue: "abc" > multiple: false > raw_property < meaning: 14 name: "greet.__protobuf__" value < stringValue: "\n\003abc\020{" > multiple: false > """ class MsgPropTests(test_utils.NDBTest): the_module = msgprop def setUp(self): super(MsgPropTests, self).setUp() global Greeting class Greeting(messages.Message): text = messages.StringField(1, required=True) when = messages.IntegerField(2) color = messages.EnumField(Color, 3) def testBasics(self): class Storage(model.Model): greet = msgprop.MessageProperty(Greeting, indexed_fields=['text'], verbose_name='The Greeting') self.assertEqual(Storage.greet._verbose_name, 'The Greeting') greet = Greeting(text='abc', when=123) store = Storage(greet=greet) key = store.put() result = key.get() self.assertFalse(result is store) self.assertEqual(result.greet.text, 'abc') self.assertEqual(result.greet.when, 123) self.assertEqual(result.greet, Greeting(when=123, text='abc')) self.assertEqual(result, Storage(greet=Greeting(when=123, text='abc'), key=key)) self.assertEqual(str(result._to_pb()), SAMPLE_PB) def testValidator(self): logs = [] def validator(prop, value): logs.append((prop, value)) return value class Storage(model.Model): greet = msgprop.MessageProperty(Greeting, indexed_fields=['text'], validator=validator) greet = Greeting(text='abc', when=123) store = Storage(greet=greet) self.assertEqual(logs, [(Storage.greet, greet)]) def testReprMessageProperty(self): greet1 = msgprop.MessageProperty(Greeting, 'foo') self.assertEqual(repr(greet1), "MessageProperty(Greeting, 'foo')") greet2 = msgprop.MessageProperty(Greeting, 'foo', protocol='protojson') self.assertEqual(repr(greet2), "MessageProperty(Greeting, 'foo', protocol='protojson')") greet3 = msgprop.MessageProperty(Greeting, 'foo', indexed_fields=['text']) self.assertEqual( repr(greet3), "MessageProperty(Greeting, 'foo', indexed_fields=('text',))") greets = msgprop.MessageProperty(Greeting, 'foo', repeated=True) self.assertEqual(repr(greets), "MessageProperty(Greeting, 'foo', repeated=True)") def testReprEnumProperty(self): color = msgprop.EnumProperty(Color, 'bar') self.assertEqual(repr(color), "EnumProperty(Color, 'bar')") colors = msgprop.EnumProperty(Color, 'bar', repeated=True) self.assertEqual(repr(colors), "EnumProperty(Color, 'bar', repeated=True)") def testQuery(self): class Storage(model.Model): greet = msgprop.MessageProperty(Greeting, indexed_fields=['text']) greet1 = Greeting(text='abc', when=123) store1 = Storage(greet=greet1) store1.put() greet2 = Greeting(text='def', when=456) store2 = Storage(greet=greet2) store2.put() q = Storage.query(Storage.greet.text == 'abc') self.assertEqual(q.fetch(), [store1]) self.assertRaises(AttributeError, lambda: Storage.greet.when) def testErrors(self): class Storage(model.Model): greet = msgprop.MessageProperty(Greeting, indexed_fields=['text']) # Call MessageProperty(x) where x is not a Message class. self.assertRaises(TypeError, msgprop.MessageProperty, Storage) self.assertRaises(TypeError, msgprop.MessageProperty, 42) self.assertRaises(TypeError, msgprop.MessageProperty, None) # Call MessageProperty(Greeting, indexed_fields=x) where x # includes invalid field names. self.assertRaises(ValueError, msgprop.MessageProperty, Greeting, indexed_fields=['text', 'nope']) self.assertRaises(TypeError, msgprop.MessageProperty, Greeting, indexed_fields=['text', 42]) self.assertRaises(TypeError, msgprop.MessageProperty, Greeting, indexed_fields=['text', None]) self.assertRaises(ValueError, msgprop.MessageProperty, Greeting, indexed_fields=['text', 'text']) # Duplicate. # Set a MessageProperty value to a non-Message instance. self.assertRaises(TypeError, Storage, greet=42) def testNothingIndexed(self): class Store(model.Model): gr = msgprop.MessageProperty(Greeting) gr = Greeting(text='abc', when=123) st = Store(gr=gr) st.put() self.assertEqual(Store.query().fetch(), [st]) self.assertRaises(AttributeError, lambda: Store.gr.when) def testForceProtocol(self): class Store(model.Model): gr = msgprop.MessageProperty(Greeting, protocol='protobuf') gr = Greeting(text='abc', when=123) st = Store(gr=gr) st.put() self.assertEqual(Store.query().fetch(), [st]) def testRepeatedMessageProperty(self): class StoreSeveral(model.Model): greets = msgprop.MessageProperty(Greeting, repeated=True, indexed_fields=['text', 'when']) ga = Greeting(text='abc', when=123) gb = Greeting(text='abc', when=456) gc = Greeting(text='def', when=123) gd = Greeting(text='def', when=456) s1 = StoreSeveral(greets=[ga, gb]) k1 = s1.put() s2 = StoreSeveral(greets=[gc, gd]) k2 = s2.put() res1 = k1.get() self.assertEqual(res1, s1) self.assertFalse(res1 is s1) self.assertEqual(res1.greets, [ga, gb]) res = StoreSeveral.query(StoreSeveral.greets.text == 'abc').fetch() self.assertEqual(res, [s1]) res = StoreSeveral.query(StoreSeveral.greets.when == 123).fetch() self.assertEqual(res, [s1, s2]) def testIndexedEnumField(self): class Storage(model.Model): greet = msgprop.MessageProperty(Greeting, indexed_fields=['color']) gred = Greeting(text='red', color=Color.RED) gblue = Greeting(text='blue', color=Color.BLUE) s1 = Storage(greet=gred) s1.put() s2 = Storage(greet=gblue) s2.put() self.assertEqual(Storage.query(Storage.greet.color == Color.RED).fetch(), [s1]) self.assertEqual(Storage.query(Storage.greet.color < Color.RED).fetch(), [s2]) def testRepeatedIndexedField(self): class AltGreeting(messages.Message): lines = messages.StringField(1, repeated=True) when = messages.IntegerField(2) class Store(model.Model): altg = msgprop.MessageProperty(AltGreeting, indexed_fields=['lines']) s1 = Store(altg=AltGreeting(lines=['foo', 'bar'], when=123)) s1.put() s2 = Store(altg=AltGreeting(lines=['baz', 'bletch'], when=456)) s2.put() res = Store.query(Store.altg.lines == 'foo').fetch() self.assertEqual(res, [s1]) def testRepeatedIndexedFieldInRepeatedMessageProperty(self): class AltGreeting(messages.Message): lines = messages.StringField(1, repeated=True) when = messages.IntegerField(2) self.assertRaises(TypeError, msgprop.MessageProperty, AltGreeting, indexed_fields=['lines'], repeated=True) def testBytesField(self): class BytesGreeting(messages.Message): data = messages.BytesField(1) when = messages.IntegerField(2) class Store(model.Model): greet = msgprop.MessageProperty(BytesGreeting, indexed_fields=['data']) bg = BytesGreeting(data='\xff', when=123) st = Store(greet=bg) st.put() res = Store.query(Store.greet.data == '\xff').fetch() self.assertEqual(res, [st]) def testNestedMessageField(self): class Inner(messages.Message): count = messages.IntegerField(1) greet = messages.MessageField(Greeting, 2) class Outer(messages.Message): inner = messages.MessageField(Inner, 1) extra = messages.StringField(2) class Store(model.Model): outer = msgprop.MessageProperty(Outer, indexed_fields=['inner.greet.text']) greet = Greeting(text='abc', when=123) inner = Inner(count=42, greet=greet) outer = Outer(inner=inner) st = Store(outer=outer) st.put() res = Store.query(Store.outer.inner.greet.text == 'abc').fetch() self.assertEqual(res, [st]) def testNestedMessageFieldIsNone(self): class Outer(messages.Message): greeting = messages.MessageField(Greeting, 1) class Store(model.Model): outer = msgprop.MessageProperty(Outer, indexed_fields=['greeting.text']) outer1 = Outer(greeting=None) store1 = Store(outer=outer1) store1.put() res = Store.query(Store.outer.greeting.text == 'abc').fetch() self.assertEqual(res, []) def testRepeatedNestedMessageField(self): class Outer(messages.Message): greeting = messages.MessageField(Greeting, 1) extra = messages.IntegerField(2) class Store(model.Model): outers = msgprop.MessageProperty(Outer, repeated=True, indexed_fields=['greeting.text']) gr1 = Greeting(text='abc', when=123) gr2 = Greeting(text='def', when=456) outer1 = Outer(greeting=gr1, extra=1) outer2 = Outer(greeting=gr2, extra=2) store1 = Store(outers=[outer1]) store1.put() store2 = Store(outers=[outer2]) store2.put() store3 = Store(outers=[outer1, outer2]) store3.put() res = Store.query(Store.outers.greeting.text == 'abc').fetch() self.assertEqual(res, [store1, store3]) def testNestedRepeatedMessageField(self): class Outer(messages.Message): greetings = messages.MessageField(Greeting, 1, repeated=True) extra = messages.IntegerField(2) class Store(model.Model): outer = msgprop.MessageProperty(Outer, indexed_fields=['greetings.text', 'extra']) gr1 = Greeting(text='abc', when=123) gr2 = Greeting(text='def', when=456) outer1 = Outer(greetings=[gr1], extra=1) outer2 = Outer(greetings=[gr2], extra=2) outer3 = Outer(greetings=[gr1, gr2], extra=3) store1 = Store(outer=outer1) store1.put() store2 = Store(outer=outer2) store2.put() store3 = Store(outer=outer3) store3.put() res = Store.query(Store.outer.greetings.text == 'abc').fetch() self.assertEqual(res, [store1, store3]) def testNestedFieldErrors(self): class Outer(messages.Message): greetings = messages.MessageField(Greeting, 1, repeated=True) extra = messages.IntegerField(2) # Parent/child conflicts. self.assertRaises(ValueError, msgprop.MessageProperty, Outer, indexed_fields=['greetings.text', 'greetings']) self.assertRaises(ValueError, msgprop.MessageProperty, Outer, indexed_fields=['greetings', 'greetings.text']) # Duplicate inner field. self.assertRaises(ValueError, msgprop.MessageProperty, Outer, indexed_fields=['greetings.text', 'greetings.text']) # Can't index MessageField. self.assertRaises(ValueError, msgprop.MessageProperty, Outer, indexed_fields=['greetings']) # Can't specify subfields for non-MessageField. self.assertRaises(ValueError, msgprop.MessageProperty, Outer, indexed_fields=['extra.foobar']) # Non-existent subfield. self.assertRaises(ValueError, msgprop.MessageProperty, Outer, indexed_fields=['greetings.foobar']) def testDoubleNestedRepeatErrors(self): class Inner(messages.Message): greets = messages.MessageField(Greeting, 1, repeated=True) class Outer(messages.Message): inner = messages.MessageField(Inner, 1) inners = messages.MessageField(Inner, 2, repeated=True) msgprop.MessageProperty(Inner, repeated=True) # Should not fail msgprop.MessageProperty(Outer, repeated=True) # Should not fail self.assertRaises(TypeError, msgprop.MessageProperty, Inner, repeated=True, indexed_fields=['greets.text']) self.assertRaises(TypeError, msgprop.MessageProperty, Outer, indexed_fields=['inners.greets.text']) self.assertRaises(TypeError, msgprop.MessageProperty, Outer, repeated=True, indexed_fields=['inner.greets.text']) def testEnumProperty(self): class Foo(model.Model): color = msgprop.EnumProperty(Color, default=Color.RED, choices=[Color.RED, Color.GREEN]) colors = msgprop.EnumProperty(Color, repeated=True) foo1 = Foo(colors=[Color.RED, Color.GREEN]) foo1.put() foo2 = Foo(color=Color.GREEN, colors=[Color.RED, Color.BLUE]) foo2.put() res = Foo.query(Foo.color == Color.RED).fetch() self.assertEqual(res, [foo1]) res = Foo.query(Foo.colors == Color.RED).fetch() self.assertEqual(res, [foo1, foo2]) class FooBar(model.Model): color = msgprop.EnumProperty(Color, indexed=False, verbose_name='The Color String', validator=lambda prop, val: Color.BLUE) self.assertEqual(FooBar.color._verbose_name, 'The Color String') foobar1 = FooBar(color=Color.RED) self.assertEqual(foobar1.color, Color.BLUE) # Tests the validator foobar1.put() self.assertRaises(datastore_errors.BadFilterError, lambda: FooBar.color == Color.RED) # Test some errors. self.assertRaises(datastore_errors.BadValueError, Foo, color=Color.BLUE) # Not in choices self.assertRaises(TypeError, Foo, color='RED') # Not an enum self.assertRaises(TypeError, Foo, color=620) # Not an enum # Invalid default self.assertRaises(TypeError, msgprop.EnumProperty, Color, default=42) # Invalid choice self.assertRaises(TypeError, msgprop.EnumProperty, Color, choices=[42]) foo2.colors.append(42) self.ExpectWarnings() self.assertRaises(TypeError, foo2.put) # Late-stage validation class Bar(model.Model): color = msgprop.EnumProperty(Color, required=True) bar1 = Bar() self.assertRaises(datastore_errors.BadValueError, bar1.put) # Missing value def testPropertyNameConflict(self): class MyMsg(messages.Message): blob_ = messages.StringField(1) msgprop.MessageProperty(MyMsg) # Should be okay self.assertRaises(ValueError, msgprop.MessageProperty, MyMsg, indexed_fields=['blob_']) def testProtocolChange(self): class Storage(model.Model): greeting = msgprop.MessageProperty(Greeting, protocol='protobuf') greet1 = Greeting(text='abc', when=123) store1 = Storage(greeting=greet1) key1 = store1.put() class Storage(model.Model): greeting = msgprop.MessageProperty(Greeting, protocol='protojson') store2 = key1.get() self.assertEqual(store2.greeting, greet1) def testProjectionQueries(self): class Wrapper(messages.Message): greet = messages.MessageField(Greeting, 1) class Storage(model.Model): wrap = msgprop.MessageProperty(Wrapper, indexed_fields=['greet.text', 'greet.when']) gr1 = Greeting(text='abc', when=123) wr1 = Wrapper(greet=gr1) st1 = Storage(wrap=wr1) st1.put() res1 = Storage.query().get(projection=['wrap.greet.text', Storage.wrap.greet.when]) self.assertNotEqual(res1, st1) self.assertEqual(res1.wrap, st1.wrap) res2 = Storage.query().get(projection=['wrap.greet.text']) self.assertEqual(res2.wrap, Wrapper(greet=Greeting(text='abc'))) def testProjectionQueriesRepeatedField(self): class Wrapper(messages.Message): greets = messages.MessageField(Greeting, 1, repeated=True) class Storage(model.Model): wrap = msgprop.MessageProperty(Wrapper, indexed_fields=['greets.text', 'greets.when']) gr1 = Greeting(text='abc', when=123) wr1 = Wrapper(greets=[gr1]) st1 = Storage(wrap=wr1) st1.put() res1 = Storage.query().get(projection=['wrap.greets.text', Storage.wrap.greets.when]) self.assertNotEqual(res1, st1) self.assertEqual(res1.wrap, st1.wrap) res2 = Storage.query().get(projection=['wrap.greets.text']) self.assertEqual(res2.wrap, Wrapper(greets=[Greeting(text='abc')])) if __name__ == '__main__': unittest.main()
apache-2.0
6,636,100,694,403,759,000
36.655172
80
0.651843
false
akrause2014/dispel4py
dispel4py/new/mpi_process.py
1
4853
# Copyright (c) The University of Edinburgh 2014 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mpi4py import MPI comm=MPI.COMM_WORLD rank=comm.Get_rank() size=comm.Get_size() from processor import GenericWrapper, simpleLogger, STATUS_TERMINATED, STATUS_ACTIVE import processor import types import traceback def process(workflow, inputs, args): processes={} inputmappings = {} outputmappings = {} success=True nodes = [ node.getContainedObject() for node in workflow.graph.nodes() ] if rank == 0 and not args.simple: try: processes, inputmappings, outputmappings = processor.assign_and_connect(workflow, size) except: success=False success=comm.bcast(success,root=0) if args.simple or not success: ubergraph = processor.create_partitioned(workflow) nodes = [ node.getContainedObject() for node in ubergraph.graph.nodes() ] if rank == 0: print 'Partitions: %s' % ', '.join(('[%s]' % ', '.join((pe.id for pe in part)) for part in workflow.partitions)) for node in ubergraph.graph.nodes(): wrapperPE = node.getContainedObject() print('%s contains %s' % (wrapperPE.id, [n.getContainedObject().id for n in wrapperPE.workflow.graph.nodes()])) try: processes, inputmappings, outputmappings = processor.assign_and_connect(ubergraph, size) inputs = processor.map_inputs_to_partitions(ubergraph, inputs) success = True except: # print traceback.format_exc() print 'dispel4py.mpi_process: Not enough processes for execution of graph' success = False success=comm.bcast(success,root=0) if not success: return try: inputs = { pe.id : v for pe, v in inputs.iteritems() } except AttributeError: pass processes=comm.bcast(processes,root=0) inputmappings=comm.bcast(inputmappings,root=0) outputmappings=comm.bcast(outputmappings,root=0) inputs=comm.bcast(inputs,root=0) if rank == 0: print 'Processes: %s' % processes # print 'Inputs: %s' % inputs for pe in nodes: if rank in processes[pe.id]: provided_inputs = processor.get_inputs(pe, inputs) wrapper = MPIWrapper(pe, provided_inputs) wrapper.targets = outputmappings[rank] wrapper.sources = inputmappings[rank] wrapper.process() class MPIWrapper(GenericWrapper): def __init__(self, pe, provided_inputs=None): GenericWrapper.__init__(self, pe) self.pe.log = types.MethodType(simpleLogger, pe) self.pe.rank = rank self.provided_inputs = provided_inputs self.terminated = 0 def _read(self): result = super(MPIWrapper, self)._read() if result is not None: return result status = MPI.Status() msg=comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) tag = status.Get_tag() while tag == STATUS_TERMINATED: self.terminated += 1 if self.terminated >= self._num_sources: break else: msg=comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) tag = status.Get_tag() return msg, tag def _write(self, name, data): try: targets = self.targets[name] except KeyError: # no targets # self.pe.log('Produced output: %s' % {name: data}) return for (inputName, communication) in targets: output = { inputName : data } dest = communication.getDestination(output) for i in dest: # self.pe.log('Sending %s to %s' % (output, i)) request=comm.isend(output, tag=STATUS_ACTIVE, dest=i) status = MPI.Status() request.Wait(status) def _terminate(self): for output, targets in self.targets.iteritems(): for (inputName, communication) in targets: for i in communication.destinations: # self.pe.log('Terminating consumer %s' % i) request=comm.isend(None, tag=STATUS_TERMINATED, dest=i)
apache-2.0
4,039,095,439,215,027,000
36.620155
127
0.608902
false
tim-shea/learnability
network_test.py
1
1160
#!/usr/bin/env python # -*- coding: utf-8 -*- from scipy.stats import binned_statistic as bin_stat from lif import * from syn import * prefs.codegen.target = 'numpy' defaultclock.dt = 1*ms params = LifParams(constant_input=3) params.update(SynParams()) neurons = LifNeurons(1000, params) excitatory_synapses = ExcitatorySynapses(neurons, params) excitatory_synapses.connect('i != j and i < 800', p=0.1) excitatory_synapses.w = 1.0 inhibitory_synapses = InhibitorySynapses(neurons, params) inhibitory_synapses.connect('i != j and i >= 800', p=0.1) inhibitory_synapses.w = -1.0 rate_monitor = PopulationRateMonitor(neurons) spike_monitor = SpikeMonitor(neurons) network = Network() network.add(neurons, excitatory_synapses, inhibitory_synapses, rate_monitor, spike_monitor) network.run(10*second, report='stdout', report_period=1.0*second, namespace={}) figure() subplot(211) suptitle('Network Activity') binned_rate = bin_stat(rate_monitor.t/second, rate_monitor.rate, bins=100) plot(binned_rate[1][:-1], binned_rate[0]) ylabel('Firing Rate (Hz)') subplot(212) plot(spike_monitor.t/second, spike_monitor.i, '.k') ylabel('Neuron #') xlabel('Time (s)') show()
cc0-1.0
-6,118,570,399,146,441,000
32.142857
91
0.741379
false