repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
collective/mr.poe
raven/handlers/logbook.py
1
2748
""" raven.handlers.logbook ~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import logbook import sys import traceback from raven.base import Client from raven.utils.encoding import to_string from raven.utils import all class SentryHandler(logbook.Handler): def __init__(self, *args, **kwargs): if len(args) == 1: arg = args[0] if isinstance(arg, basestring): self.client = kwargs.pop('client_cls', Client)(dsn=arg) elif isinstance(arg, Client): self.client = arg else: raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % ( self.__class__.__name__, arg, )) args = [] else: try: self.client = kwargs.pop('client') except KeyError: raise TypeError('Expected keyword argument for SentryHandler: client') super(SentryHandler, self).__init__(*args, **kwargs) def emit(self, record): try: # Avoid typical config issues by overriding loggers behavior if record.channel.startswith('sentry.errors'): print >> sys.stderr, to_string(self.format(record)) return return self._emit(record) except Exception: print >> sys.stderr, "Top level Sentry exception caught - failed creating log record" print >> sys.stderr, to_string(record.msg) print >> sys.stderr, to_string(traceback.format_exc()) try: self.client.captureException() except Exception: pass def _emit(self, record): data = { 'level': logbook.get_level_name(record.level).lower(), 'logger': record.channel, 'message': self.format(record), } event_type = 'raven.events.Message' handler_kwargs = {'message': record.msg, 'params': record.args} # If there's no exception being processed, exc_info may be a 3-tuple of None # http://docs.python.org/library/sys.html#sys.exc_info if record.exc_info is True or (record.exc_info and all(record.exc_info)): handler = self.client.get_handler(event_type) data.update(handler.capture(**handler_kwargs)) event_type = 'raven.events.Exception' handler_kwargs = {'exc_info': record.exc_info} return self.client.capture(event_type, data=data, extra=record.extra, **handler_kwargs )
bsd-3-clause
-5,173,493,003,048,261,000
33.35
122
0.564774
false
google/fedjax
fedjax/aggregators/aggregator.py
1
2676
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Interface definitions for aggregators.""" from typing import Any, Callable, Iterable, Tuple from fedjax.core import dataclasses from fedjax.core import tree_util from fedjax.core.federated_data import ClientId from fedjax.core.typing import Params PyTree = Any AggregatorState = PyTree @dataclasses.dataclass class Aggregator: """Interface for algorithms to aggregate. This interface defines aggregator algorithms that are used at each round. Aggregator state contains any round specific parameters (e.g. number of bits) that will be passed from round to round. This state is initialized by `init` and passed as input into and returned as output from `aggregate`. We strongly recommend using fedjax.dataclass to define state as this provides immutability, type hinting, and works by default with JAX transformations. The expected usage of Aggregator is as follows: ``` aggregator = mean_aggregator() state = aggregator.init() for i in range(num_rounds): clients_params_and_weights = compute_client_outputs(i) aggregated_params, state = aggregator.apply(clients_params_and_weights, state) ``` Attributes: init: Returns initial state of aggregator. apply: Returns the new aggregator state and aggregated params. """ init: Callable[[], AggregatorState] apply: Callable[ [Iterable[Tuple[ClientId, Params, float]], AggregatorState], Tuple[Params, AggregatorState]] @dataclasses.dataclass class MeanAggregatorState: """Mean aggregator is stateless.""" def mean_aggregator() -> Aggregator: """Builds (weighted) mean aggregator.""" def init(): return MeanAggregatorState() def apply(clients_params_and_weights, state): def extract_params_and_weight(clients_params_and_weight): _, param, weight = clients_params_and_weight return param, weight params_and_weights = map(extract_params_and_weight, clients_params_and_weights) return tree_util.tree_mean(params_and_weights), state return Aggregator(init, apply)
apache-2.0
165,806,188,716,476,260
33.753247
79
0.727952
false
ScottBuchanan/eden
modules/s3/s3validators.py
1
147861
# -*- coding: utf-8 -*- """ Custom Validators @requires: U{B{I{gluon}} <http://web2py.com>} @copyright: (c) 2010-2015 Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("single_phone_number_pattern", "multi_phone_number_pattern", "s3_single_phone_requires", "s3_phone_requires", "IS_ACL", "IS_ADD_PERSON_WIDGET", "IS_ADD_PERSON_WIDGET2", "IS_COMBO_BOX", "IS_FLOAT_AMOUNT", "IS_HTML_COLOUR", "IS_INT_AMOUNT", "IS_IN_SET_LAZY", "IS_ISO639_2_LANGUAGE_CODE", "IS_JSONS3", "IS_LAT", "IS_LON", "IS_LAT_LON", "IS_LOCATION", "IS_LOCATION_SELECTOR", "IS_ONE_OF", "IS_ONE_OF_EMPTY", "IS_ONE_OF_EMPTY_SELECT", "IS_NOT_ONE_OF", "IS_PERSON_GENDER", "IS_PHONE_NUMBER", "IS_PROCESSED_IMAGE", "IS_SITE_SELECTOR", "IS_UTC_DATETIME", "IS_UTC_OFFSET", "QUANTITY_INV_ITEM", ) import re import time from datetime import datetime, timedelta JSONErrors = (NameError, TypeError, ValueError, AttributeError, KeyError) try: import json # try stdlib (Python 2.6) except ImportError: try: import simplejson as json # try external module except: import gluon.contrib.simplejson as json # fallback to pure-Python module from gluon.contrib.simplejson.decoder import JSONDecodeError JSONErrors += (JSONDecodeError,) from gluon import * #from gluon import current #from gluon.validators import IS_DATE_IN_RANGE, IS_MATCH, IS_NOT_IN_DB, IS_IN_SET, IS_INT_IN_RANGE, IS_FLOAT_IN_RANGE, IS_EMAIL from gluon.storage import Storage from gluon.validators import Validator from s3datetime import S3DateTime from s3utils import s3_orderby_fields, s3_unicode, s3_validate def translate(text): if text is None: return None elif isinstance(text, (str, unicode)): if hasattr(current, "T"): return str(current.T(text)) return str(text) def options_sorter(x, y): return (s3_unicode(x[1]).upper() > s3_unicode(y[1]).upper() and 1) or -1 DEFAULT = lambda: None # ----------------------------------------------------------------------------- # Phone number requires # Multiple phone numbers can be separated by comma, slash, semi-colon. # (Semi-colon appears in Brazil OSM data.) # @ToDo: Need to beware of separators used inside phone numbers # (e.g. 555-1212, ext 9), so may need fancier validation if we see that. # @ToDo: Add tooltip giving list syntax, and warning against above. # (Current use is in importing OSM files, so isn't interactive.) # @ToDo: Code that should only have a single # should use # s3_single_phone_requires. Check what messaging assumes. phone_number_pattern = "\+?\s*[\s\-\.\(\)\d]+(?:(?: x| ext)\s?\d{1,5})?" single_phone_number_pattern = "%s$" % phone_number_pattern multi_phone_number_pattern = "%s(\s*(,|/|;)\s*%s)*$" % (phone_number_pattern, phone_number_pattern) s3_single_phone_requires = IS_MATCH(single_phone_number_pattern) s3_phone_requires = IS_MATCH(multi_phone_number_pattern, error_message="Invalid phone number!") # ============================================================================= class IS_JSONS3(Validator): """ Example: Used as:: INPUT(_type='text', _name='name', requires=IS_JSON(error_message="This is not a valid json input") >>> IS_JSON()('{"a": 100}') ({u'a': 100}, None) >>> IS_JSON()('spam1234') ('spam1234', 'invalid json') """ def __init__(self, error_message="Invalid JSON"): try: self.driver_auto_json = current.db._adapter.driver_auto_json except: current.log.warning("Update Web2Py to 2.9.11 to get native JSON support") self.driver_auto_json = [] self.error_message = error_message # ------------------------------------------------------------------------- def __call__(self, value): # Convert CSV import format to valid JSON value = value.replace("'", "\"") try: if "dumps" in self.driver_auto_json: json.loads(value) # raises error in case of malformed JSON return (value, None) # the serialized value is not passed else: return (json.loads(value), None) except JSONErrors, e: return (value, "%s: %s" % (current.T(self.error_message), e)) # ------------------------------------------------------------------------- def formatter(self, value): if value is None: return None if "loads" in self.driver_auto_json: return value else: return json.dumps(value) # ============================================================================= class IS_LAT(Validator): """ example: INPUT(_type="text", _name="name", requires=IS_LAT()) Latitude has to be in decimal degrees between -90 & 90 - we attempt to convert DMS format into decimal degrees """ def __init__(self, error_message = "Latitude/Northing should be between -90 & 90!" ): self.minimum = -90 self.maximum = 90 self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): try: value = float(value) if self.minimum <= value <= self.maximum: return (value, None) else: return (value, self.error_message) except: pattern = re.compile("^[0-9]{,3}[\D\W][0-9]{,3}[\D\W][0-9]+$") if not pattern.match(value): return (value, self.error_message) else: val = [] val.append(value) sep = [] count = 0 for i in val[0]: try: int(i) count += 1 except: sep.append(count) count += 1 sec = "" posn = sep[1] while posn != (count-1): sec = sec + val[0][posn+1]#to join the numbers for seconds posn += 1 posn2 = sep[0] mins = "" while posn2 != (sep[1]-1): mins = mins + val[0][posn2+1]# to join the numbers for minutes posn2 += 1 deg = "" posn3 = 0 while posn3 != (sep[0]): deg = deg + val[0][posn3] # to join the numbers for degree posn3 += 1 e = int(sec)/60 #formula to get back decimal degree f = int(mins) + e #formula g = int(f) / 60 #formula value = int(deg) + g return (value, None) # ============================================================================= class IS_LON(Validator): """ example: INPUT(_type="text", _name="name", requires=IS_LON()) Longitude has to be in decimal degrees between -180 & 180 - we attempt to convert DMS format into decimal degrees """ def __init__(self, error_message = "Longitude/Easting should be between -180 & 180!" ): self.minimum = -180 self.maximum = 180 self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): try: value = float(value) if self.minimum <= value <= self.maximum: return (value, None) else: return (value, self.error_message) except: pattern = re.compile("^[0-9]{,3}[\D\W][0-9]{,3}[\D\W][0-9]+$") if not pattern.match(value): return (value, self.error_message) else: val = [] val.append(value) sep = [] count = 0 for i in val[0]: try: int(i) count += 1 except: sep.append(count) count += 1 sec = "" posn = sep[1] while posn != (count-1): # join the numbers for seconds sec = sec + val[0][posn+1] posn += 1 posn2 = sep[0] mins = "" while posn2 != (sep[1]-1): # join the numbers for minutes mins = mins + val[0][posn2+1] posn2 += 1 deg = "" posn3 = 0 while posn3 != (sep[0]): # join the numbers for degree deg = deg + val[0][posn3] posn3 += 1 e = int(sec) / 60 #formula to get back decimal degree f = int(mins) + e #formula g = int(f) / 60 #formula value = int(deg) + g return (value, None) # ============================================================================= class IS_LAT_LON(Validator): """ Designed for use within the S3LocationLatLonWidget. For Create forms, this will create a new location from the additional fields For Update forms, this will check that we have a valid location_id FK and update any changes @ToDo: Audit """ def __init__(self, field, ): self.field = field # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) selector = str(self.field).replace(".", "_") post_vars = current.request.post_vars lat = post_vars.get("%s_lat" % selector, None) if lat == "": lat = None lon = post_vars.get("%s_lon" % selector, None) if lon == "": lon = None if lat is None or lon is None: # We don't accept None return (value, current.T("Latitude and Longitude are required")) # Check Lat lat, error = IS_LAT()(lat) if error: return (value, error) # Check Lon lon, error = IS_LON()(lon) if error: return (value, error) if value: # update db = current.db db(db.gis_location.id == value).update(lat=lat, lon=lon) else: # create value = current.db.gis_location.insert(lat=lat, lon=lon) # OK return (value, None) # ============================================================================= class IS_NUMBER(object): """ Used by s3data.py to wrap IS_INT_AMOUNT & IS_FLOAT_AMOUNT """ # ------------------------------------------------------------------------- @staticmethod def represent(number, precision=2): if number is None: return "" if isinstance(number, int): return IS_INT_AMOUNT.represent(number) elif isinstance(number, float): return IS_FLOAT_AMOUNT.represent(number, precision) else: return number # ============================================================================= class IS_INT_AMOUNT(IS_INT_IN_RANGE): """ Validation, widget and representation of integer-values with thousands-separators """ def __init__(self, minimum=None, maximum=None, error_message=None): IS_INT_IN_RANGE.__init__(self, minimum=minimum, maximum=maximum, error_message=error_message) # ------------------------------------------------------------------------- def __call__(self, value): thousands_sep = "," value = str(value).replace(thousands_sep, "") return IS_INT_IN_RANGE.__call__(self, value) # ------------------------------------------------------------------------- @staticmethod def represent(number): """ Change the format of the number depending on the language Based on https://code.djangoproject.com/browser/django/trunk/django/utils/numberformat.py """ if number is None: return "" try: intnumber = int(number) except: intnumber = number settings = current.deployment_settings THOUSAND_SEPARATOR = settings.get_L10n_thousands_separator() NUMBER_GROUPING = settings.get_L10n_thousands_grouping() # The negative/positive sign for the number if float(number) < 0: sign = "-" else: sign = "" str_number = unicode(intnumber) if str_number[0] == "-": str_number = str_number[1:] # Walk backwards over the integer part, inserting the separator as we go int_part_gd = "" for cnt, digit in enumerate(str_number[::-1]): if cnt and not cnt % NUMBER_GROUPING: int_part_gd += THOUSAND_SEPARATOR int_part_gd += digit int_part = int_part_gd[::-1] return sign + int_part # ------------------------------------------------------------------------- @staticmethod def widget(f, v, **attributes): from gluon.sqlhtml import StringWidget attr = Storage(attributes) classes = attr.get("_class", "").split(" ") classes = " ".join([c for c in classes if c != "integer"]) _class = "%s int_amount" % classes attr.update(_class=_class) return StringWidget.widget(f, v, **attr) # ============================================================================= class IS_FLOAT_AMOUNT(IS_FLOAT_IN_RANGE): """ Validation, widget and representation of float-values with thousands-separators """ def __init__(self, minimum=None, maximum=None, error_message=None, dot="."): IS_FLOAT_IN_RANGE.__init__(self, minimum=minimum, maximum=maximum, error_message=error_message, dot=dot) # ------------------------------------------------------------------------- def __call__(self, value): thousands_sep = "," value = str(value).replace(thousands_sep, "") return IS_FLOAT_IN_RANGE.__call__(self, value) # ------------------------------------------------------------------------- @staticmethod def represent(number, precision=None): """ Change the format of the number depending on the language Based on https://code.djangoproject.com/browser/django/trunk/django/utils/numberformat.py """ if number is None: return "" DECIMAL_SEPARATOR = current.deployment_settings.get_L10n_decimal_separator() str_number = unicode(number) if "." in str_number: int_part, dec_part = str_number.split(".") if precision is not None: dec_part = dec_part[:precision] else: int_part, dec_part = str_number, "" if int(dec_part) == 0: dec_part = "" elif precision is not None: dec_part = dec_part + ("0" * (precision - len(dec_part))) if dec_part: dec_part = DECIMAL_SEPARATOR + dec_part int_part = IS_INT_AMOUNT.represent(int(int_part)) return int_part + dec_part # ------------------------------------------------------------------------- @staticmethod def widget(f, v, **attributes): from gluon.sqlhtml import StringWidget attr = Storage(attributes) classes = attr.get("_class", "").split(" ") classes = " ".join([c for c in classes if c != "double"]) _class = "%s float_amount" % classes attr.update(_class=_class) return StringWidget.widget(f, v, **attr) # ============================================================================= class IS_HTML_COLOUR(IS_MATCH): """ example:: INPUT(_type="text", _name="name", requires=IS_HTML_COLOUR()) """ def __init__(self, error_message="must be a 6 digit hex code! (format: rrggbb)" ): IS_MATCH.__init__(self, "^[0-9a-fA-F]{6}$", error_message) # ============================================================================= regex1 = re.compile("[\w_]+\.[\w_]+") regex2 = re.compile("%\((?P<name>[^\)]+)\)s") class IS_ONE_OF_EMPTY(Validator): """ Filtered version of IS_IN_DB(): validates a given value as key of another table, filtered by the 'filterby' field for one of the 'filter_opts' options (=a selective IS_IN_DB()) NB Filtering isn't active in GQL. For the dropdown representation: 'label' can be a string template for the record, or a set of field names of the fields to be used as option labels, or a function or lambda to create an option label from the respective record (which has to return a string, of course). The function will take the record as an argument. No 'options' method as designed to be called next to an Autocomplete field so don't download a large dropdown unnecessarily. """ def __init__(self, dbset, field, label=None, filterby=None, filter_opts=None, not_filterby=None, not_filter_opts=None, realms=None, updateable=False, instance_types=None, error_message="invalid value!", orderby=None, groupby=None, left=None, multiple=False, zero="", sort=True, _and=None, ): """ Validator for foreign keys. @param dbset: a Set of records like db(query), or db itself @param field: the field in the referenced table @param label: lookup method for the label corresponding a value, alternatively a string template to be filled with values from the record @param filterby: a field in the referenced table to filter by @param filter_opts: values for the filterby field which indicate records to include @param not_filterby: a field in the referenced table to filter by @param not_filter_opts: values for not_filterby field which indicate records to exclude @param realms: only include records belonging to the listed realms (if None, all readable records will be included) @param updateable: only include records in the referenced table which can be updated by the user (if False, all readable records will be included) @param instance_types: if the referenced table is a super-entity, then only include these instance types (this parameter is required for super entity lookups!) @param error_message: the error message to return for failed validation @param orderby: orderby for the options @param groupby: groupby for the options @param left: additional left joins required for the options lookup (super-entity instance left joins will be included automatically) @param multiple: allow multiple values (for list:reference types) @param zero: add this as label for the None-option (allow selection of "None") @param sort: sort options alphabetically by their label @param _and: internal use """ if hasattr(dbset, "define_table"): self.dbset = dbset() else: self.dbset = dbset (ktable, kfield) = str(field).split(".") if not label: label = "%%(%s)s" % kfield if isinstance(label, str): if regex1.match(str(label)): label = "%%(%s)s" % str(label).split(".")[-1] ks = regex2.findall(label) if not kfield in ks: ks += [kfield] fields = ["%s.%s" % (ktable, k) for k in ks] elif hasattr(label, "bulk"): # S3Represent ks = [kfield] if label.custom_lookup: # Represent uses a custom lookup, so we only # retrieve the keys here fields = [kfield] orderby = field else: # Represent uses a standard field lookup, so # we can do that right here label._setup() fields = list(label.fields) if kfield not in fields: fields.insert(0, kfield) # Unlikely, but possible: represent and validator # using different keys - commented for now for # performance reasons (re-enable if ever necessary) #key = label.key #if key and key not in fields: #fields.insert(0, key) else: ks = [kfield] try: table = current.s3db[ktable] fields =[str(f) for f in table if f.name not in ("wkt", "the_geom")] except RuntimeError: fields = "all" self.fields = fields self.label = label self.ktable = ktable if not kfield or not len(kfield): self.kfield = "id" else: self.kfield = kfield self.ks = ks self.error_message = error_message self.theset = None self.orderby = orderby self.groupby = groupby self.left = left self.multiple = multiple self.zero = zero self.sort = sort self._and = _and self.filterby = filterby self.filter_opts = filter_opts self.not_filterby = not_filterby self.not_filter_opts = not_filter_opts self.realms = realms self.updateable = updateable self.instance_types = instance_types # ------------------------------------------------------------------------- def set_self_id(self, id): if self._and: self._and.record_id = id # ------------------------------------------------------------------------- def set_filter(self, filterby = None, filter_opts = None, not_filterby = None, not_filter_opts = None): """ This can be called from prep to apply a filter based on data in the record or the primary resource id. """ if filterby: self.filterby = filterby if filter_opts: self.filter_opts = filter_opts if not_filterby: self.not_filterby = not_filterby if not_filter_opts: self.not_filter_opts = not_filter_opts # ------------------------------------------------------------------------- def build_set(self): dbset = self.dbset db = dbset._db ktablename = self.ktable if ktablename not in db: table = current.s3db.table(ktablename, db_only=True) else: table = db[ktablename] if table: if self.fields == "all": fields = [table[f] for f in table.fields if f not in ("wkt", "the_geom")] else: fieldnames = [f.split(".")[1] if "." in f else f for f in self.fields] fields = [table[k] for k in fieldnames if k in table.fields] if db._dbname not in ("gql", "gae"): orderby = self.orderby or reduce(lambda a, b: a|b, fields) groupby = self.groupby dd = dict(orderby=orderby, groupby=groupby) query, left = self.query(table, fields=fields, dd=dd) if left is not None: if self.left is not None: if not isinstance(left, list): left = [left] ljoins = [str(join) for join in self.left] for join in left: ljoin = str(join) if ljoin not in ljoins: self.left.append(join) ljoins.append(ljoin) else: self.left = left if self.left is not None: dd.update(left=self.left) # Make sure we have all ORDERBY fields in the query # (otherwise postgresql will complain) fieldnames = [str(f) for f in fields] for f in s3_orderby_fields(table, dd.get("orderby")): if str(f) not in fieldnames: fields.append(f) fieldnames.append(str(f)) records = dbset(query).select(distinct=True, *fields, **dd) else: # Note this does not support filtering. orderby = self.orderby or \ reduce(lambda a, b: a|b, (f for f in fields if f.type != "id")) # Caching breaks Colorbox dropdown refreshes #dd = dict(orderby=orderby, cache=(current.cache.ram, 60)) dd = dict(orderby=orderby) records = dbset.select(db[self.ktable].ALL, **dd) self.theset = [str(r[self.kfield]) for r in records] label = self.label try: # Is callable if hasattr(label, "bulk"): # S3Represent => use bulk option d = label.bulk(None, rows=records, list_type=False, show_link=False) labels = [d.get(r[self.kfield], d[None]) for r in records] else: # Standard representation function labels = map(label, records) except TypeError: if isinstance(label, str): labels = map(lambda r: label % dict(r), records) elif isinstance(label, (list, tuple)): labels = map(lambda r: \ " ".join([r[l] for l in label if l in r]), records) elif "name" in table: labels = map(lambda r: r.name, records) else: labels = map(lambda r: r[self.kfield], records) self.labels = labels if labels and self.sort: items = zip(self.theset, self.labels) # Alternative variant that handles generator objects, # doesn't seem necessary, retained here just in case: #orig_labels = self.labels #orig_theset = self.theset #items = [] #for i in xrange(len(orig_theset)): #label = orig_labels[i] ##if hasattr(label, "flatten"): ##try: ##label = label.flatten() ##except: ##pass #items.append((orig_theset[i], label)) items.sort(key=lambda item: s3_unicode(item[1]).lower()) self.theset, self.labels = zip(*items) else: self.theset = None self.labels = None # ------------------------------------------------------------------------- def query(self, table, fields=None, dd=None): """ Construct the query to lookup the options (separated from build_set so the query can be extracted and used in other lookups, e.g. filter options). @param table: the lookup table @param fields: fields (updatable list) @param dd: additional query options (updatable dict) """ # Accessible-query method = "update" if self.updateable else "read" query, left = self.accessible_query(method, table, instance_types=self.instance_types) # Available-query if "deleted" in table: query &= (table["deleted"] != True) # Realms filter? if self.realms: auth = current.auth if auth.is_logged_in() and \ auth.get_system_roles().ADMIN in auth.user.realms: # Admin doesn't filter pass else: query &= auth.permission.realm_query(table, self.realms) all_fields = [str(f) for f in fields] if fields is not None else [] filterby = self.filterby if filterby and filterby in table: filter_opts = self.filter_opts if filter_opts: if None in filter_opts: # Needs special handling (doesn't show up in 'belongs') _query = (table[filterby] == None) filter_opts = [f for f in filter_opts if f is not None] if filter_opts: _query = _query | (table[filterby].belongs(filter_opts)) query &= _query else: query &= (table[filterby].belongs(filter_opts)) if not self.orderby and \ fields is not None and dd is not None: filterby_field = table[filterby] if dd is not None: dd.update(orderby=filterby_field) if str(filterby_field) not in all_fields: fields.append(filterby_field) all_fields.append(str(filterby_field)) not_filterby = self.not_filterby if not_filterby and not_filterby in table: not_filter_opts = self.not_filter_opts if not_filter_opts: if None in not_filter_opts: # Needs special handling (doesn't show up in 'belongs') _query = (table[not_filterby] == None) not_filter_opts = [f for f in not_filter_opts if f is not None] if not_filter_opts: _query = _query | (table[not_filterby].belongs(not_filter_opts)) query &= (~_query) else: query &= (~(table[not_filterby].belongs(not_filter_opts))) if not self.orderby and \ fields is not None and dd is not None: filterby_field = table[not_filterby] if dd is not None: dd.update(orderby=filterby_field) if str(filterby_field) not in all_fields: fields.append(filterby_field) all_fields.append(str(filterby_field)) return query, left # ------------------------------------------------------------------------- @classmethod def accessible_query(cls, method, table, instance_types=None): """ Returns an accessible query (and left joins, if necessary) for records in table the user is permitted to access with method @param method: the method (e.g. "read" or "update") @param table: the table @param instance_types: list of instance tablenames, if table is a super-entity (required in this case!) @return: tuple (query, left) where query is the query and left joins is the list of left joins required for the query @note: for higher security policies and super-entities with many instance types this can give a very complex query. Try to always limit the instance types to what is really needed """ DEFAULT = (table._id > 0) left = None if "instance_type" in table: # Super-entity if not instance_types: return DEFAULT, left query = None auth = current.auth s3db = current.s3db for instance_type in instance_types: itable = s3db.table(instance_type) if itable is None: continue join = itable.on(itable[table._id.name] == table._id) if left is None: left = [join] else: left.append(join) q = (itable._id != None) & \ auth.s3_accessible_query(method, itable) if "deleted" in itable: q &= itable.deleted != True if query is None: query = q else: query |= q if query is None: query = DEFAULT else: query = current.auth.s3_accessible_query(method, table) return query, left # ------------------------------------------------------------------------- # Removed as we don't want any options downloaded unnecessarily #def options(self): # ------------------------------------------------------------------------- def __call__(self, value): try: dbset = self.dbset table = dbset._db[self.ktable] deleted_q = ("deleted" in table) and (table["deleted"] == False) or False filter_opts_q = False filterby = self.filterby if filterby and filterby in table: filter_opts = self.filter_opts if filter_opts: if None in filter_opts: # Needs special handling (doesn't show up in 'belongs') filter_opts_q = (table[filterby] == None) filter_opts = [f for f in filter_opts if f is not None] if filter_opts: filter_opts_q |= (table[filterby].belongs(filter_opts)) else: filter_opts_q = (table[filterby].belongs(filter_opts)) if self.multiple: if isinstance(value, list): values = [str(v) for v in value] elif isinstance(value, basestring) and \ value[0] == "|" and value[-1] == "|": values = value[1:-1].split("|") elif value: values = [value] else: values = [] if self.theset: if not [x for x in values if not x in self.theset]: return (values, None) else: return (value, self.error_message) else: field = table[self.kfield] query = None for v in values: q = (field == v) query = query is not None and query | q or q if filter_opts_q != False: query = query is not None and \ (filter_opts_q & (query)) or filter_opts_q if deleted_q != False: query = query is not None and \ (deleted_q & (query)) or deleted_q if dbset(query).count() < 1: return (value, self.error_message) return (values, None) elif self.theset: if str(value) in self.theset: if self._and: return self._and(value) else: return (value, None) else: values = [value] query = None for v in values: q = (table[self.kfield] == v) query = query is not None and query | q or q if filter_opts_q != False: query = query is not None and \ (filter_opts_q & (query)) or filter_opts_q if deleted_q != False: query = query is not None and \ (deleted_q & (query)) or deleted_q if dbset(query).count(): if self._and: return self._and(value) else: return (value, None) except: pass return (value, self.error_message) # ============================================================================= class IS_ONE_OF(IS_ONE_OF_EMPTY): """ Extends IS_ONE_OF_EMPTY by restoring the 'options' method. """ def options(self, zero=True): self.build_set() theset, labels = self.theset, self.labels if theset is None or labels is None: items = [] else: items = zip(theset, labels) if zero and self.zero is not None and not self.multiple: items.insert(0, ("", self.zero)) return items # ============================================================================= class IS_ONE_OF_EMPTY_SELECT(IS_ONE_OF_EMPTY): """ Extends IS_ONE_OF_EMPTY by displaying an empty SELECT (instead of INPUT) """ def options(self, zero=True): return [("", "")] # ============================================================================= class IS_NOT_ONE_OF(IS_NOT_IN_DB): """ Filtered version of IS_NOT_IN_DB() - understands the 'deleted' field. - makes the field unique (amongst non-deleted field) Example: - INPUT(_type="text", _name="name", requires=IS_NOT_ONE_OF(db, db.table)) """ def __call__(self, value): value = str(value) if not value.strip(): return (value, translate(self.error_message)) if value in self.allowed_override: return (value, None) (tablename, fieldname) = str(self.field).split(".") dbset = self.dbset table = dbset.db[tablename] field = table[fieldname] query = (field == value) if "deleted" in table: query = (table["deleted"] == False) & query rows = dbset(query).select(limitby=(0, 1)) if len(rows) > 0: if isinstance(self.record_id, dict): for f in self.record_id: if str(getattr(rows[0], f)) != str(self.record_id[f]): return (value, translate(self.error_message)) elif str(rows[0][table._id.name]) != str(self.record_id): return (value, translate(self.error_message)) return (value, None) # ============================================================================= class IS_LOCATION(Validator): """ Allow all locations, or locations by level. """ def __init__(self, level = None, error_message = None ): self.level = level # can be a List or a single element self.error_message = error_message # Make it like IS_ONE_OF to support AddResourceLink self.ktable = "gis_location" self.kfield = "id" # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): level = self.level if level == "L0": # Use cached countries. This returns name if id is for a country. try: location_id = int(value) except ValueError: ok = False else: ok = current.gis.get_country(location_id) else: db = current.db table = db.gis_location query = (table.id == value) & (table.deleted == False) if level: if isinstance(level, (tuple, list)): if None in level: # None needs special handling level = [l for l in level if l is not None] query &= ((table.level.belongs(level)) | \ (table.level == None)) else: query &= (table.level.belongs(level)) else: query &= (table.level == level) ok = db(query).select(table.id, limitby=(0, 1)).first() if ok: return (value, None) else: return (value, self.error_message or current.T("Invalid Location!")) # ============================================================================= class IS_LOCATION_SELECTOR(Validator): """ Designed for use within the S3LocationSelectorWidget. For Create forms, this will create a new location from the additional fields For Update forms, this will check that we have a valid location_id FK and update any changes @ToDo: Audit """ def __init__(self, error_message = None, ): self.error_message = error_message self.errors = Storage() self.id = None # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) db = current.db table = db.gis_location if value == "dummy": # Create form if not current.auth.s3_has_permission("create", table): return (None, current.auth.messages.access_denied) location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (None, error) if location.name or location.lat or location.lon or location.wkt or \ location.street or location.postcode or location.parent: vars = dict(name = location.name, lat = location.lat, lon = location.lon, wkt = location.wkt, gis_feature_type = location.gis_feature_type, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, lon_min = location.lon_min, lon_max = location.lon_max, lat_min = location.lat_min, lat_max = location.lat_max ) if vars["wkt"] and current.deployment_settings.get_gis_spatialdb(): # Also populate the spatial field vars["the_geom"] = vars["wkt"] value = table.insert(**vars) # onaccept vars["id"] = value current.gis.update_location_tree(vars) return (value, None) else: return (None, None) else: # This must be an Update form if not current.auth.s3_has_permission("update", table, record_id=value): return (value, current.auth.messages.access_denied) # Check that this is a valid location_id query = (table.id == value) & \ (table.deleted == False) & \ (table.level == None) # NB Specific Locations only location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Update the record, in case changes have been made self.id = value location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (value, error) vars = dict(name = location.name, lat = location.lat, lon = location.lon, inherited = location.inherited, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, wkt = location.wkt, lon_min = location.lon_min, lon_max = location.lon_max, lat_min = location.lat_min, lat_max = location.lat_max ) if vars["wkt"] and current.deployment_settings.get_gis_spatialdb(): # Also populate the spatial field vars["the_geom"] = vars["wkt"] db(table.id == value).update(**vars) # onaccept vars["id"] = value current.gis.update_location_tree(vars) return (value, None) else: return (value, self.error_message or current.T("Invalid Location!")) # ------------------------------------------------------------------------- def _process_values(self): """ Read the request.vars & prepare for a record insert/update Note: This is also used by IS_SITE_SELECTOR() """ # Rough check for valid Lat/Lon (detailed later) post_vars = current.request.post_vars lat = post_vars.get("gis_location_lat", None) lon = post_vars.get("gis_location_lon", None) if lat: try: lat = float(lat) except ValueError: self.errors["lat"] = current.T("Latitude is Invalid!") if lon: try: lon = float(lon) except ValueError: self.errors["lon"] = current.T("Longitude is Invalid!") if self.errors: return None L0 = post_vars.get("gis_location_L0", None) db = current.db table = db.gis_location # Are we allowed to create Locations? auth = current.auth def permitted_to_create(): if not auth.s3_has_permission("create", table): self.errors["location_id"] = auth.messages.access_denied return False return True # What level of hierarchy are we allowed to edit? s3db = current.s3db if auth.s3_has_role(current.session.s3.system_roles.MAP_ADMIN): # 'MapAdmin' always has permission to edit hierarchy locations L1_allowed = True L2_allowed = True L3_allowed = True L4_allowed = True L5_allowed = True else: if L0: htable = s3db.gis_hierarchy query = (htable.location_id == L0) config = db(query).select(htable.edit_L1, htable.edit_L2, htable.edit_L3, htable.edit_L4, htable.edit_L5, limitby=(0, 1)).first() if L0 and config: # Lookup each level individually L1_allowed = config.edit_L1 L2_allowed = config.edit_L2 L3_allowed = config.edit_L3 L4_allowed = config.edit_L4 L5_allowed = config.edit_L5 else: # default is True L1_allowed = True L2_allowed = True L3_allowed = True L4_allowed = True L5_allowed = True # We don't need to do onvalidation of the Location Hierarchy records # separately as we don't have anything extra to validate than we have # done already onaccept = current.gis.update_location_tree L1 = post_vars.get("gis_location_L1", None) L2 = post_vars.get("gis_location_L2", None) L3 = post_vars.get("gis_location_L3", None) L4 = post_vars.get("gis_location_L4", None) L5 = post_vars.get("gis_location_L5", None) # Check if we have parents to create # L1 if L1: try: # Is this an ID? L1 = int(L1) # Do we need to update it's parent? if L0: location = db(table.id == L1).select(table.name, table.parent, limitby=(0, 1) ).first() if location and (location.parent != int(L0)): db(query).update(parent = L0) location["level"] = "L1" location["id"] = L1 onaccept(location) except: # Name # Test for duplicates query = (table.name == L1) & (table.level == "L1") if L0: query &= (table.parent == L0) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L1 = location.id elif L1_allowed: if permitted_to_create(): if L0: f = dict(name = L1, level = "L1", parent = L0, ) L1 = table.insert(**f) f["id"] = L1 onaccept(f) else: f = dict(name=L1, level="L1", ) L1 = table.insert(**f) f["id"] = L1 onaccept(f) else: return None else: L1 = None # L2 if L2: try: # Is this an ID? L2 = int(L2) # Do we need to update it's parent? if L1: location = db(table.id == L2).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L1): db(query).update(parent=L1) location["level"] = "L2" location["id"] = L2 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L2 parenting direct to L0 query = (table.name == L2) & (table.level == "L2") if L1: query &= (table.parent == L1) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L2 = location.id elif L2_allowed: if permitted_to_create(): if L1: f = dict(name=L2, level="L2", parent=L1, ) L2 = table.insert(**f) f["id"] = L2 onaccept(f) elif L0: f = dict(name=L2, level="L2", parent=L0, ) L2 = table.insert(**f) f["id"] = L2 onaccept(f) else: f = dict(name=L2, level="L2", ) L2 = table.insert(**f) f["id"] = L2 onaccept(f) else: return None else: L2 = None # L3 if L3: try: # Is this an ID? L3 = int(L3) # Do we need to update it's parent? if L2: location = db(table.id == L3).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L2): db(query).update(parent=L2) location["level"] = "L3" location["id"] = L3 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L3 parenting direct to L0/1 query = (table.name == L3) & (table.level == "L3") if L2: query &= (table.parent == L2) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L3 = location.id elif L3_allowed: if permitted_to_create(): if L2: f = dict(name=L3, level="L3", parent=L2, ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) elif L1: f = dict(name=L3, level="L3", parent=L1, ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) elif L0: f = dict(name=L3, level="L3", parent=L0, ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) else: f = dict(name=L3, level="L3", ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) else: return None else: L3 = None # L4 if L4: try: # Is this an ID? L4 = int(L4) # Do we need to update it's parent? if L3: location = db(table.id == L4).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L3): db(query).update(parent=L3) location["level"] = "L4" location["id"] = L4 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L4 parenting direct to L0/1/2 query = (table.name == L4) & (table.level == "L4") if L3: query &= (table.parent == L3) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L4 = location.id elif L4_allowed: if permitted_to_create(): if L3: f = dict(name=L4, level="L4", parent=L3, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) elif L2: f = dict(name=L4, level="L4", parent=L2, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) elif L1: f = dict(name=L4, level="L4", parent=L1, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) elif L0: f = dict(name=L4, level="L4", parent=L0, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) else: f = dict(name=L4, level="L4", ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) else: return None else: L4 = None # L5 if L5: try: # Is this an ID? L5 = int(L5) # Do we need to update it's parent? if L4: location = db(table.id == L5).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L4): db(query).update(parent=L4) location["level"] = "L5" location["id"] = L5 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L5 parenting direct to L0/1/2/3 query = (table.name == L5) & (table.level == "L5") if L4: query &= (table.parent == L4) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L5 = location.id elif L5_allowed: if permitted_to_create(): if L4: f = dict(name=L5, level="L5", parent=L4, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L3: f = dict(name=L5, level="L5", parent=L3, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L2: f = dict(name=L5, level="L5", parent=L2, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L1: f = dict(name=L5, level="L5", parent=L1, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L0: f = dict(name=L5, level="L5", parent=L1, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) else: f = dict(name=L5, level="L5", ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) else: return None else: L5 = None # Check if we have a specific location to create name = post_vars.get("gis_location_name", None) wkt = post_vars.get("gis_location_wkt", None) street = post_vars.get("gis_location_street", None) postcode = post_vars.get("gis_location_postcode", None) parent = L5 or L4 or L3 or L2 or L1 or L0 or None # Move vars into form. form = Storage() form.errors = dict() form.vars = Storage() form_vars = form.vars form_vars.lat = lat form_vars.lon = lon form_vars.wkt = wkt if wkt: # Polygon (will be corrected as-required by wkt_centroid) form_vars.gis_feature_type = "3" else: # Point form_vars.gis_feature_type = "1" form_vars.parent = parent if self.id: # Provide the old record to check inherited form.record = db(table.id == self.id).select(table.inherited, table.lat, table.lon, limitby=(0, 1)).first() # onvalidation s3db.gis_location_onvalidation(form) if form.errors: self.errors = form.errors return None location = Storage(name=name, lat=form_vars.lat, lon=form_vars.lon, inherited=form_vars.inherited, street=street, postcode=postcode, parent=parent, wkt = form_vars.wkt, gis_feature_type = form_vars.gis_feature_type, lon_min = form_vars.lon_min, lon_max = form_vars.lon_max, lat_min = form_vars.lat_min, lat_max = form_vars.lat_max ) return location # ============================================================================= class IS_SITE_SELECTOR(IS_LOCATION_SELECTOR): """ Extends the IS_LOCATION_SELECTOR() validator to transparently support Sites of the specified type. Note that these cannot include any other mandatory fields other than Name & location_id Designed for use within the S3LocationSelectorWidget with sites. For Create forms, this will create a new site & location from the additional fields For Update forms, this will normally just check that we have a valid site_id FK - although there is the option to create a new location there too, in which case it acts as-above. @ToDo: Audit """ def __init__(self, site_type = "project_site", error_message = None, ): self.error_message = error_message self.errors = Storage() self.id = None self.site_type = site_type # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) db = current.db auth = current.auth gis = current.gis table = db.gis_location stable = db[self.site_type] if value == "dummy": # Create form if not auth.s3_has_permission("create", stable): return (None, auth.messages.access_denied) location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (None, error) if location.name or location.lat or location.lon or \ location.street or location.postcode or location.parent: # Location creation vars = dict(name = location.name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, wkt = form.vars.wkt, lon_min = form.vars.lon_min, lon_max = form.vars.lon_max, lat_min = form.vars.lat_min, lat_max = form.vars.lat_max ) location_id = table.insert(**vars) # Location onaccept vars["id"] = location_id gis.update_location_tree(vars) # Site creation value = stable.insert(name = location.name, location_id = location_id) return (value, None) else: return (None, None) else: # This must be an Update form if not auth.s3_has_permission("update", stable, record_id=value): return (value, auth.messages.access_denied) # Check that this is a valid site_id query = (stable.id == value) & \ (stable.deleted == False) site = db(query).select(stable.id, stable.name, stable.location_id, limitby=(0, 1)).first() location_id = site.location_id if site else None if location_id: # Update the location, in case changes have been made self.id = value location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (value, error) # Location update name = location.name vars = dict(name = name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent ) lquery = (table.id == location_id) db(lquery).update(**vars) # Location onaccept vars["id"] = location_id gis.update_location_tree(vars) if stable.name != name: # Site Name has changed db(query).update(name = name) return (value, None) return (value, self.error_message or current.T("Invalid Site!")) # ============================================================================= class IS_ADD_PERSON_WIDGET(Validator): """ Validator for S3AddPersonWidget """ def __init__(self, error_message=None): self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) person_id = None if value: try: person_id = int(value) except: pass request = current.request if request.env.request_method == "POST": if "import" in request.args: # Widget Validator not appropriate for this context return (person_id, None) T = current.T db = current.db s3db = current.s3db ptable = db.pr_person ctable = db.pr_contact def email_validate(value, person_id): """ Validate the email address """ error_message = T("Please enter a valid email address") if value is not None: value = value.strip() # No email? if not value: email_required = \ current.deployment_settings.get_hrm_email_required() if email_required: return (value, error_message) return (value, None) # Valid email? value, error = IS_EMAIL()(value) if error: return value, error_message # Unique email? query = (ctable.deleted != True) & \ (ctable.contact_method == "EMAIL") & \ (ctable.value == value) if person_id: query &= (ctable.pe_id == ptable.pe_id) & \ (ptable.id != person_id) email = db(query).select(ctable.id, limitby=(0, 1)).first() if email: error_message = T("This email-address is already registered.") return value, error_message # Ok! return value, None _vars = request.post_vars mobile = _vars["mobile_phone"] if mobile: # Validate the phone number regex = re.compile(single_phone_number_pattern) if not regex.match(mobile): error = T("Invalid phone number") return (person_id, error) if person_id: # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Validate and update the person record query = (ptable.id == person_id) data = Storage() for f in ptable._filter_fields(_vars): value, error = s3_validate(ptable, f, _vars[f]) if error: return (person_id, error) if value: if f == "date_of_birth": data[f] = value.isoformat() else: data[f] = value if data: db(query).update(**data) # Update the contact information & details record = db(query).select(ptable.pe_id, limitby=(0, 1)).first() if record: pe_id = record.pe_id r = ctable(pe_id=pe_id, contact_method="EMAIL") email = _vars["email"] if email: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "EMAIL") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if email != r.value: db(query).update(value=email) else: # insert ctable.insert(pe_id=pe_id, contact_method="EMAIL", value=email) if mobile: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "SMS") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if mobile != r.value: db(query).update(value=mobile) else: # insert ctable.insert(pe_id=pe_id, contact_method="SMS", value=mobile) occupation = _vars["occupation"] if occupation: pdtable = s3db.pr_person_details query = (pdtable.person_id == person_id) & \ (pdtable.deleted != True) r = db(query).select(pdtable.occupation, limitby=(0, 1)).first() if r: # update if occupation != r.occupation: db(query).update(occupation=occupation) else: # insert pdtable.insert(person_id=person_id, occupation=occupation) else: # Create a new person record # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Validate the email email, error = email_validate(_vars.email, None) if error: return (None, error) # Validate and add the person record for f in ptable._filter_fields(_vars): value, error = s3_validate(ptable, f, _vars[f]) if error: return (None, error) elif f == "date_of_birth" and \ value: _vars[f] = value.isoformat() person_id = ptable.insert(**ptable._filter_fields(_vars)) # Need to update post_vars here, # for some reason this doesn't happen through validation alone request.post_vars.update(person_id=str(person_id)) if person_id: # Update the super-entities s3db.update_super(ptable, dict(id=person_id)) # Read the created pe_id query = (ptable.id == person_id) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() # Add contact information as provided if _vars.email: ctable.insert(pe_id=person.pe_id, contact_method="EMAIL", value=_vars.email) if mobile: ctable.insert(pe_id=person.pe_id, contact_method="SMS", value=_vars.mobile_phone) if _vars.occupation: s3db.pr_person_details.insert(person_id = person_id, occupation = _vars.occupation) else: # Something went wrong return (None, self.error_message or \ T("Could not add person record")) return (person_id, None) # ============================================================================= class IS_ADD_PERSON_WIDGET2(Validator): """ Validator for S3AddPersonWidget2 @ToDo: get working human_resource_id """ def __init__(self, error_message=None, allow_empty=False): """ Constructor @param error_message: alternative error message @param allow_empty: allow the selector to be left empty @note: This validator can *not* be used together with IS_EMPTY_OR, because when a new person gets entered, the submitted value for person_id would be None and hence satisfy IS_EMPTY_OR, and then this validator would never be reached and no new person record would be created => instead of IS_EMPTY_OR, use IS_ADD_PERSON_WIDGET2(allow_empty=True). """ self.error_message = error_message self.allow_empty = allow_empty # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = not allow_empty # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) person_id = None if value: try: person_id = int(value) except: pass if person_id: # Nothing to do here - we can't change values within this widget return (person_id, None) request = current.request if request.env.request_method == "POST": if "import" in request.args: # Widget Validator not appropriate for this context return (person_id, None) T = current.T db = current.db s3db = current.s3db ptable = db.pr_person ctable = s3db.pr_contact def name_split(name): """ Split a full name into First Middle Last NB This *will* cause issues as people often have multi-word firstnames and surnames http://stackoverflow.com/questions/259634/splitting-a-persons-name-into-forename-and-surname http://stackoverflow.com/questions/159567/how-can-i-parse-the-first-middle-and-last-name-from-a-full-name-field-in-sql """ #names = name.split(" ") # Remove prefixes & suffixes #bad = ("mr", "mrs", "ms", "dr", "eng", # "jr", "sr", "esq", "junior", "senior", # "ii", "iii", "iv", "v", # "2nd", "3rd", "4th", "5th", # ) #names = filter(lambda x: x.lower() not in bad, names) # Assume First Name is a single word #first_name = names[0] # Assume Last Name is a single word! #if len(names) > 1: # last_name = names[-1] #else: # last_name = None # Assume all other names go into the Middle Name #if len(names) > 2: # middle_name = " ".join(names[1:-1]) #else: # middle_name = None #return first_name, middle_name, last_name # https://github.com/derek73/python-nameparser from nameparser import HumanName name = HumanName(name) # @ToDo?: name.nickname return name.first, name.middle, name.last def email_validate(value, person_id): """ Validate the email address """ error_message = T("Please enter a valid email address") if value is not None: value = value.strip() # No email? if not value: email_required = \ current.deployment_settings.get_hrm_email_required() if email_required: return (value, error_message) return (value, None) # Valid email? value, error = IS_EMAIL()(value) if error: return value, error_message # Unique email? query = (ctable.deleted != True) & \ (ctable.contact_method == "EMAIL") & \ (ctable.value == value) if person_id: query &= (ctable.pe_id == ptable.pe_id) & \ (ptable.id != person_id) email = db(query).select(ctable.id, limitby=(0, 1)).first() if email: error_message = T("This email-address is already registered.") return value, error_message # Ok! return value, None post_vars = request.post_vars mobile = post_vars["mobile_phone"] if mobile: # Validate the mobile phone number validator = IS_PHONE_NUMBER(international = True) mobile, error = validator(mobile) if error: return (person_id, error) home_phone = post_vars.get("home_phone", None) if home_phone: # Validate the home phone number validator = IS_PHONE_NUMBER() mobile, error = validator(mobile) if error: return (person_id, error) #if person_id: # # Filter out location_id (location selector form values # # being processed only after this widget has been validated) # post_vars = Storage([(k, post_vars[k]) # for k in post_vars if k != "location_id"]) # # Separate the Name into components # first_name, middle_name, last_name = name_split(post_vars["full_name"]) # post_vars["first_name"] = first_name # post_vars["middle_name"] = middle_name # post_vars["last_name"] = last_name # # Validate and update the person record # query = (ptable.id == person_id) # data = Storage() # for f in ptable._filter_fields(post_vars): # value, error = s3_validate(ptable, f, post_vars[f]) # if error: # return (person_id, error) # if value: # if f == "date_of_birth": # data[f] = value.isoformat() # else: # data[f] = value # if data: # db(query).update(**data) # # Update the contact information & details # record = db(query).select(ptable.pe_id, # limitby=(0, 1)).first() # if record: # pe_id = record.pe_id # r = ctable(pe_id=pe_id, contact_method="EMAIL") # email = post_vars["email"] # if email: # query = (ctable.pe_id == pe_id) & \ # (ctable.contact_method == "EMAIL") &\ # (ctable.deleted != True) # r = db(query).select(ctable.value, # limitby=(0, 1)).first() # if r: # update # if email != r.value: # db(query).update(value=email) # else: # insert # ctable.insert(pe_id=pe_id, # contact_method="EMAIL", # value=email) # if mobile: # query = (ctable.pe_id == pe_id) & \ # (ctable.contact_method == "SMS") &\ # (ctable.deleted != True) # r = db(query).select(ctable.value, # limitby=(0, 1)).first() # if r: # update # if mobile != r.value: # db(query).update(value=mobile) # else: # insert # ctable.insert(pe_id=pe_id, # contact_method="SMS", # value=mobile) # if home_phone: # query = (ctable.pe_id == pe_id) & \ # (ctable.contact_method == "HOME_PHONE") &\ # (ctable.deleted != True) # r = db(query).select(ctable.value, # limitby=(0, 1)).first() # if r: # update # if home_phone != r.value: # db(query).update(value=home_phone) # else: # insert # ctable.insert(pe_id=pe_id, # contact_method="HOME_PHONE", # value=home_phone) # occupation = post_vars.get("occupation", None) # if occupation: # pdtable = s3db.pr_person_details # query = (pdtable.person_id == person_id) & \ # (pdtable.deleted != True) # r = db(query).select(pdtable.occupation, # limitby=(0, 1)).first() # if r: # update # if occupation != r.occupation: # db(query).update(occupation=occupation) # else: # insert # pdtable.insert(person_id=person_id, # occupation=occupation) #else: # Create a new person record # Filter out location_id (location selector form values # being processed only after this widget has been validated) post_vars = Storage([(k, post_vars[k]) for k in post_vars if k != "location_id"]) fullname = post_vars["full_name"] if not fullname and self.allow_empty: return None, None # Validate the email email, error = email_validate(post_vars.email, None) if error: return (None, error) # Separate the Name into components first_name, middle_name, last_name = name_split(fullname) post_vars["first_name"] = first_name post_vars["middle_name"] = middle_name post_vars["last_name"] = last_name # Validate and add the person record for f in ptable._filter_fields(post_vars): value, error = s3_validate(ptable, f, post_vars[f]) if error: return (None, error) elif f == "date_of_birth" and \ value: post_vars[f] = value.isoformat() person_id = ptable.insert(**ptable._filter_fields(post_vars)) # Need to update post_vars here, # for some reason this doesn't happen through validation alone request.post_vars.update(person_id=str(person_id)) if person_id: # Update the super-entities s3db.update_super(ptable, dict(id=person_id)) # Update realm current.auth.s3_set_record_owner(ptable, person_id) # Read the created pe_id query = (ptable.id == person_id) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() # Add contact information as provided if post_vars.email: ctable.insert(pe_id=person.pe_id, contact_method="EMAIL", value=post_vars.email) if mobile: ctable.insert(pe_id=person.pe_id, contact_method="SMS", value=post_vars.mobile_phone) if home_phone: ctable.insert(pe_id=person.pe_id, contact_method="HOME_PHONE", value=post_vars.home_phone) details = {} if post_vars.occupation: details["occupation"] = post_vars.occupation if post_vars.father_name: details["father_name"] = post_vars.father_name if post_vars.grandfather_name: details["grandfather_name"] = post_vars.grandfather_name if details: details["person_id"] = person_id s3db.pr_person_details.insert(**details) else: # Something went wrong return (person_id, self.error_message or \ T("Could not add person record")) return (person_id, None) # ============================================================================= class IS_PROCESSED_IMAGE(Validator): """ Uses an S3ImageCropWidget to allow the user to crop/scale images and processes the results sent by the browser. @param file_cb: callback that returns the file for this field @param error_message: the error message to be returned @param image_bounds: the boundaries for the processed image @param upload_path: upload path for the image """ def __init__(self, field_name, file_cb, error_message="No image was specified!", image_bounds=(300, 300), upload_path=None, ): self.field_name = field_name self.file_cb = file_cb self.error_message = error_message self.image_bounds = image_bounds self.upload_path = upload_path def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) r = current.request vars = r.post_vars if r.env.request_method == "GET": return (value, None) # If there's a newly uploaded file, accept it. It'll be processed in # the update form. # NOTE: A FieldStorage with data evaluates as False (odd!) file = vars.get(self.field_name) if file not in ("", None): return (file, None) encoded_file = vars.get("imagecrop-data") file = self.file_cb() if not (encoded_file or file): return value, current.T(self.error_message) # Decode the base64-encoded image from the client side image crop # process if, that worked. if encoded_file: import base64 import uuid try: from cStringIO import StringIO except ImportError: from StringIO import StringIO metadata, encoded_file = encoded_file.split(",") filename, datatype, enctype = metadata.split(";") f = Storage() f.filename = uuid.uuid4().hex + filename f.file = StringIO(base64.decodestring(encoded_file)) return (f, None) # Crop the image, if we've got the crop points. points = vars.get("imagecrop-points") if points and file: import os points = map(float, points.split(",")) if not self.upload_path: path = os.path.join(r.folder, "uploads", "images", file) else: path = os.path.join(self.upload_path, file) current.s3task.async("crop_image", args=[path] + points + [self.image_bounds[0]]) return (None, None) # ============================================================================= class IS_UTC_OFFSET(Validator): """ Validates a given string value as UTC offset in the format +/-HHMM @param error_message: the error message to be returned @note: all leading parts of the string (before the trailing offset specification) will be ignored and replaced by 'UTC ' in the return value, if the string passes through. """ def __init__(self, error_message="invalid UTC offset!"): self.error_message = error_message # ------------------------------------------------------------------------- def __call__(self, value): if value and isinstance(value, str): offset = S3DateTime.get_offset_value(value) if offset is not None: hours, seconds = divmod(abs(offset), 3600) minutes = int(seconds / 60) sign = "-" if offset < 0 else "+" return ("%s%02d%02d" % (sign, hours, minutes), None) return (value, self.error_message) # ============================================================================= class IS_UTC_DATETIME(Validator): """ Validates a given value as datetime string and returns the corresponding UTC datetime. Example: - INPUT(_type="text", _name="name", requires=IS_UTC_DATETIME()) @param format: strptime/strftime format template string, for directives refer to your strptime implementation @param error_message: error message to be returned @param utc_offset: offset to UTC in seconds, if not specified, the value is considered to be UTC @param minimum: the minimum acceptable datetime @param maximum: the maximum acceptable datetime @note: datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss, with an optional trailing UTC offset specified as +/-HHMM (+ for eastern, - for western timezones) """ def __init__(self, format=None, error_message=None, utc_offset=None, minimum=None, maximum=None): if format is None: self.format = format = str(current.deployment_settings.get_L10n_datetime_format()) else: self.format = format = str(format) self.utc_offset = utc_offset self.minimum = minimum self.maximum = maximum delta = timedelta(seconds=self.delta()) min_local = minimum and minimum + delta or None max_local = maximum and maximum + delta or None if error_message is None: if minimum is None and maximum is None: error_message = current.T("enter date and time") elif minimum is None: error_message = current.T("enter date and time on or before %(max)s") elif maximum is None: error_message = current.T("enter date and time on or after %(min)s") else: error_message = current.T("enter date and time in range %(min)s %(max)s") if min_local: min = min_local.strftime(format) else: min = "" if max_local: max = max_local.strftime(format) else: max = "" self.error_message = error_message % dict(min = min, max = max) # ------------------------------------------------------------------------- def delta(self, utc_offset=None): if utc_offset is not None: self.utc_offset = utc_offset if self.utc_offset is None: self.utc_offset = current.session.s3.utc_offset offset, error = IS_UTC_OFFSET()(self.utc_offset) if error: self.utc_offset = "+0000" # fallback to UTC else: self.utc_offset = offset delta = S3DateTime.get_offset_value(self.utc_offset) return delta # ------------------------------------------------------------------------- def __call__(self, value): val = value.strip() # Get UTC offset if len(val) > 5 and val[-5] in ("+", "-") and val[-4:].isdigit(): # UTC offset specified in dtstr dtstr = val[0:-5].strip() utc_offset = val[-5:] else: # use default UTC offset dtstr = val utc_offset = self.utc_offset # Offset must be in range -2359 to +2359 offset = self.delta(utc_offset=utc_offset) if offset < -86340 or offset > 86340: return (val, self.error_message) # Convert into datetime object try: (y, m, d, hh, mm, ss, t0, t1, t2) = \ time.strptime(dtstr, self.format) dt = datetime(y, m, d, hh, mm, ss) except: try: (y, m, d, hh, mm, ss, t0, t1, t2) = \ time.strptime(dtstr + ":00", self.format) dt = datetime(y, m, d, hh, mm, ss) except: return(value, self.error_message) # Validate dt_utc = dt - timedelta(seconds=offset) if self.minimum and dt_utc < self.minimum or \ self.maximum and dt_utc > self.maximum: return (dt_utc, self.error_message) else: return (dt_utc, None) # ------------------------------------------------------------------------- def formatter(self, value): format = self.format offset = self.delta() if not value: return "-" elif offset: dt = value + timedelta(seconds=offset) return dt.strftime(format) else: dt = value return dt.strftime(format) + "+0000" # ============================================================================= class IS_ACL(IS_IN_SET): """ Validator for ACLs @attention: Incomplete! Does not validate yet, but just convert. """ def __call__(self, value): """ Validation @param value: the value to validate """ if not isinstance(value, (list, tuple)): value = [value] acl = 0x0000 for v in value: try: flag = int(v) except (ValueError, TypeError): flag = 0x0000 else: acl |= flag return (acl, None) # ============================================================================= class IS_COMBO_BOX(Validator): """ Designed for use with an Autocomplete. - catches any new entries & creates the appropriate record @ToDo: Audit """ def __init__(self, tablename, requires, # The normal validator error_message = None, ): self.tablename = tablename self.requires = requires self.error_message = error_message # ------------------------------------------------------------------------- def __call__(self, value): if not value: # Do the normal validation return self.requires(value) elif isinstance(value, int): # If this is an ID then this is an update form # @ToDo: Can we assume that? # Do the normal validation return self.requires(value) else: # Name => create form tablename = self.tablename db = current.db table = db[tablename] # Test for duplicates query = (table.name == value) r = db(query).select(table.id, limitby=(0, 1)).first() if r: # Use Existing record value = r.id return (value, None) if not current.auth.s3_has_permission("create", table): return (None, current.auth.messages.access_denied) value = table.insert(name=value) # onaccept onaccept = current.s3db.get_config(tablename, "onaccept") if onaccept: onaccept(form=Storage(vars=Storage(id=value))) return (value, None) # ============================================================================= class QUANTITY_INV_ITEM(Validator): """ For Inventory module """ def __init__(self, db, inv_item_id, item_pack_id ): self.inv_item_id = inv_item_id self.item_pack_id = item_pack_id current.db = db # ------------------------------------------------------------------------- def __call__(self, value): db = current.db args = current.request.args track_quantity = 0 if args[1] == "track_item" and len(args) > 2: # look to see if we already have a quantity stored in the track item id = args[2] track_record = current.s3db.inv_track_item[id] track_quantity = track_record.quantity if track_quantity >= float(value): # value reduced or unchanged return (value, None) error = "Invalid Quantity" # @todo: better error catching query = (db.inv_inv_item.id == self.inv_item_id) & \ (db.inv_inv_item.item_pack_id == db.supply_item_pack.id) inv_item_record = db(query).select(db.inv_inv_item.quantity, db.supply_item_pack.quantity, db.supply_item_pack.name, limitby = (0, 1)).first() # @todo: this should be a virtual field if inv_item_record and value: query = (db.supply_item_pack.id == self.item_pack_id) send_record = db(query).select(db.supply_item_pack.quantity, limitby=(0, 1)).first() send_quantity = (float(value) - track_quantity) * send_record.quantity inv_quantity = inv_item_record.inv_inv_item.quantity * \ inv_item_record.supply_item_pack.quantity if send_quantity > inv_quantity: return (value, "Only %s %s (%s) in the Warehouse Stock." % (inv_quantity, inv_item_record.supply_item_pack.name, inv_item_record.supply_item_pack.quantity) ) else: return (value, None) else: return (value, error) # ============================================================================= class IS_IN_SET_LAZY(Validator): """ Like IS_IN_SET but with options obtained from a supplied function. Options are instantiated when the validator or its options() method is called, so don't need to be generated until it's used. Useful if the field is not needed on every request, and does significant processing to construct its options, or generates a large collection. If the options are just from a database query, one can use IS_ONE_OF instead. Raises an exception if an options collection is passed rather than a callable as this is a programming error, e.g. accidentally *calling* the options function in the constructor instead of passing the function. That would not get lazy options instantiation. The options collection (theset) and labels collection parameters to IS_IN_SET are replaced by: @param theset_fn: Function of no arguments that returns a collection of options and (optionally) labels. Both options and labels can be supplied via a dict or OrderedDict (options are keys, values are labels), list (or tuple) of two-element lists (or tuples) (element 0 in each pair is an option, element 1 is it's label). Otherwise, labels are obtained either by calling the supplied represent function on each item produced by theset_fn, or (if no represent is supplied), the items themselves are used as labels. @param represent: Function of one argument that returns the label for a given option. If there is a function call that returns the collection, just put "lambda:" in front of the call. E.g.: Field("nationality", requires = IS_EMPTY_OR(IS_IN_SET_LAZY( lambda: gis.get_countries(key_type="code"))), label = T("Nationality"), represent = lambda code: gis.get_country(code, key_type="code") or UNKNOWN_OPT) Keyword parameters are same as for IS_IN_SET, except for labels, which is not replaced by a function that parallels theset_fn, since ordering is problematic if theset_fn returns a dict. """ def __init__( self, theset_fn, represent=None, error_message="value not allowed", multiple=False, zero="", sort=False, ): self.multiple = multiple if not callable(theset_fn): raise TypeError("Argument must be a callable.") self.theset_fn = theset_fn self.theset = None self.labels = None self.error_message = error_message self.zero = zero self.sort = sort # ------------------------------------------------------------------------- def _make_theset(self): theset = self.theset_fn() if theset: if isinstance(theset, dict): self.theset = [str(item) for item in theset] self.labels = theset.values() elif isinstance(theset, (tuple,list)): # @ToDo: Can this be a Rows? if isinstance(theset[0], (tuple,list)) and len(theset[0])==2: self.theset = [str(item) for item,label in theset] self.labels = [str(label) for item,label in theset] else: self.theset = [str(item) for item in theset] if represent: self.labels = [represent(item) for item in theset] else: self.theset = theset else: self.theset = [] # ------------------------------------------------------------------------- def options(self, zero=True): if not self.theset: self._make_theset() if not self.labels: items = [(k, k) for (i, k) in enumerate(self.theset)] else: items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)] if self.sort: items.sort(options_sorter) if zero and not self.zero is None and not self.multiple: items.insert(0, ("", self.zero)) return items # ------------------------------------------------------------------------- def __call__(self, value): if not self.theset: self._make_theset() if self.multiple: ### if below was values = re.compile("[\w\-:]+").findall(str(value)) if isinstance(value, (str,unicode)): values = [value] elif isinstance(value, (tuple, list)): values = value elif not value: values = [] else: values = [value] failures = [x for x in values if not x in self.theset] if failures and self.theset: if self.multiple and (value == None or value == ""): return ([], None) return (value, self.error_message) if self.multiple: if isinstance(self.multiple,(tuple,list)) and \ not self.multiple[0]<=len(values)<self.multiple[1]: return (values, self.error_message) return (values, None) return (value, None) # ============================================================================= class IS_TIME_INTERVAL_WIDGET(Validator): """ Simple validator for the S3TimeIntervalWidget, returns the selected time interval in seconds """ def __init__(self, field): self.field = field # ------------------------------------------------------------------------- def __call__(self, value): try: val = int(value) except ValueError: return (0, None) request = current.request _vars = request.post_vars try: mul = int(_vars[("%s_multiplier" % self.field).replace(".", "_")]) except ValueError: return (0, None) seconds = val * mul return (seconds, None) # ============================================================================= class IS_PERSON_GENDER(IS_IN_SET): """ Special validator for pr_person.gender and derivates, accepts the "O" option even if it's not in the set. """ def __call__(self, value): if value == 4: # 4 = other, always accepted even if hidden return value, None else: return super(IS_PERSON_GENDER, self).__call__(value) # ============================================================================= class IS_PHONE_NUMBER(Validator): """ Validator for single phone numbers with option to enforce E.123 international notation (with leading + and no punctuation or spaces). """ def __init__(self, international = False, error_message = None): """ Constructor @param international: enforce E.123 international notation, no effect if turned off globally in deployment settings @param error_message: alternative error message """ self.international = international self.error_message = error_message def __call__(self, value): """ Validation of a value @param value: the value @return: tuple (value, error), where error is None if value is valid. With international=True, the value returned is converted into E.123 international notation. """ T = current.T error_message = self.error_message number = str(value).strip() number, error = s3_single_phone_requires(number) if not error: if self.international and \ current.deployment_settings \ .get_msg_require_international_phone_numbers(): if not error_message: error_message = T("Enter phone number in international format like +46783754957") # Require E.123 international format number = "".join(re.findall("[\d+]+", number)) match = re.match("(\+)([1-9]\d+)$", number) #match = re.match("(\+|00|\+00)([1-9]\d+)$", number) if match: number = "+%s" % match.groups()[1] return (number, None) else: return (number, None) if not error_message: error_message = T("Enter a valid phone number") return (value, error_message) # ============================================================================= class IS_ISO639_2_LANGUAGE_CODE(IS_IN_SET): """ Validate ISO639-2 Alpha-2/Alpha-3 language codes """ def __init__(self, error_message = "Invalid language code", multiple = False, select = DEFAULT, sort = False, zero = ""): """ Constructor @param error_message: alternative error message @param multiple: allow selection of multiple options @param select: dict of options for the selector, defaults to settings.L10n.languages, set explicitly to None to allow all languages @param sort: sort options in selector @param zero: use this label for the empty-option (default="") """ super(IS_ISO639_2_LANGUAGE_CODE, self).__init__( self.language_codes(), error_message = error_message, multiple = multiple, zero = zero, sort = sort, ) if select is DEFAULT: self._select = current.deployment_settings.get_L10n_languages() else: self._select = select # ------------------------------------------------------------------------- def options(self, zero=True): """ Get the options for the selector. This could be only a subset of all valid options (self._select), therefore overriding superclass function here. """ language_codes = self.language_codes() if self._select: language_codes_dict = dict(language_codes) items = [(k, v) for k, v in self._select.items() if k in language_codes_dict] else: items = self.language_codes() if self.sort: items.sort(options_sorter) if zero and not self.zero is None and not self.multiple: items.insert(0, ("", self.zero)) return items # ------------------------------------------------------------------------- @classmethod def represent(cls, code): """ Represent a language code by language name @param code: the language code """ l10n_languages = current.deployment_settings.get_L10n_languages() if code in l10n_languages: name = l10n_languages[code] else: all_languages = dict(cls.language_codes()) name = all_languages.get(code) if name is None: name = current.messages.UNKNOWN_OPT return name # ------------------------------------------------------------------------- @staticmethod def language_codes(): """ Returns a list of tuples of ISO639-1 alpha-2 language codes, can also be used to look up the language name Just the subset which are useful for Translations - 2 letter code preferred, 3-letter code where none exists, no 'families' or Old """ return [#("aar", "Afar"), ("aa", "Afar"), #("abk", "Abkhazian"), ("ab", "Abkhazian"), ("ace", "Achinese"), ("ach", "Acoli"), ("ada", "Adangme"), ("ady", "Adyghe; Adygei"), #("afa", "Afro-Asiatic languages"), ("afh", "Afrihili"), #("afr", "Afrikaans"), ("af", "Afrikaans"), ("ain", "Ainu"), #("aka", "Akan"), ("ak", "Akan"), ("akk", "Akkadian"), #("alb", "Albanian"), ("sq", "Albanian"), ("ale", "Aleut"), #("alg", "Algonquian languages"), ("alt", "Southern Altai"), #("amh", "Amharic"), ("am", "Amharic"), #("ang", "English, Old (ca.450-1100)"), ("anp", "Angika"), #("apa", "Apache languages"), #("ara", "Arabic"), ("ar", "Arabic"), #("arc", "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)"), #("arg", "Aragonese"), ("an", "Aragonese"), #("arm", "Armenian"), ("hy", "Armenian"), ("arn", "Mapudungun; Mapuche"), ("arp", "Arapaho"), #("art", "Artificial languages"), ("arw", "Arawak"), #("asm", "Assamese"), ("as", "Assamese"), ("ast", "Asturian; Bable; Leonese; Asturleonese"), #("ath", "Athapascan languages"), #("aus", "Australian languages"), #("ava", "Avaric"), ("av", "Avaric"), #("ave", "Avestan"), ("ae", "Avestan"), ("awa", "Awadhi"), #("aym", "Aymara"), ("ay", "Aymara"), #("aze", "Azerbaijani"), ("az", "Azerbaijani"), #("bad", "Banda languages"), #("bai", "Bamileke languages"), #("bak", "Bashkir"), ("ba", "Bashkir"), ("bal", "Baluchi"), #("bam", "Bambara"), ("bm", "Bambara"), ("ban", "Balinese"), #("baq", "Basque"), ("eu", "Basque"), ("bas", "Basa"), #("bat", "Baltic languages"), ("bej", "Beja; Bedawiyet"), #("bel", "Belarusian"), ("be", "Belarusian"), ("bem", "Bemba"), #("ben", "Bengali"), ("bn", "Bengali"), #("ber", "Berber languages"), ("bho", "Bhojpuri"), #("bih", "Bihari languages"), #("bh", "Bihari languages"), ("bik", "Bikol"), ("bin", "Bini; Edo"), #("bis", "Bislama"), ("bi", "Bislama"), ("bla", "Siksika"), #("bnt", "Bantu (Other)"), #("bos", "Bosnian"), ("bs", "Bosnian"), ("bra", "Braj"), #("bre", "Breton"), ("br", "Breton"), #("btk", "Batak languages"), ("bua", "Buriat"), ("bug", "Buginese"), #("bul", "Bulgarian"), ("bg", "Bulgarian"), #("bur", "Burmese"), ("my", "Burmese"), ("byn", "Blin; Bilin"), ("cad", "Caddo"), #("cai", "Central American Indian languages"), ("car", "Galibi Carib"), #("cat", "Catalan; Valencian"), ("ca", "Catalan; Valencian"), #("cau", "Caucasian languages"), ("ceb", "Cebuano"), #("cel", "Celtic languages"), #("cha", "Chamorro"), ("ch", "Chamorro"), ("chb", "Chibcha"), #("che", "Chechen"), ("ce", "Chechen"), ("chg", "Chagatai"), #("chi", "Chinese"), ("zh", "Chinese"), ("chk", "Chuukese"), ("chm", "Mari"), ("chn", "Chinook jargon"), ("cho", "Choctaw"), ("chp", "Chipewyan; Dene Suline"), ("chr", "Cherokee"), #("chu", "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"), ("cu", "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"), #("chv", "Chuvash"), ("cv", "Chuvash"), ("chy", "Cheyenne"), #("cmc", "Chamic languages"), ("cop", "Coptic"), #("cor", "Cornish"), ("kw", "Cornish"), #("cos", "Corsican"), ("co", "Corsican"), #("cpe", "Creoles and pidgins, English based"), #("cpf", "Creoles and pidgins, French-based"), #("cpp", "Creoles and pidgins, Portuguese-based"), #("cre", "Cree"), ("cr", "Cree"), ("crh", "Crimean Tatar; Crimean Turkish"), #("crp", "Creoles and pidgins"), ("csb", "Kashubian"), ("cus", "Cushitic languages"), #("cze", "Czech"), ("cs", "Czech"), ("dak", "Dakota"), #("dan", "Danish"), ("da", "Danish"), ("dar", "Dargwa"), #("day", "Land Dayak languages"), ("del", "Delaware"), ("den", "Slave (Athapascan)"), ("dgr", "Dogrib"), ("din", "Dinka"), #("div", "Divehi; Dhivehi; Maldivian"), ("dv", "Divehi; Dhivehi; Maldivian"), ("doi", "Dogri"), #("dra", "Dravidian languages"), ("dsb", "Lower Sorbian"), ("dua", "Duala"), #("dum", "Dutch, Middle (ca.1050-1350)"), #("dut", "Dutch; Flemish"), ("nl", "Dutch; Flemish"), ("dyu", "Dyula"), #("dzo", "Dzongkha"), ("dz", "Dzongkha"), ("efi", "Efik"), #("egy", "Egyptian (Ancient)"), ("eka", "Ekajuk"), ("elx", "Elamite"), #("eng", "English"), ("en", "English"), #("enm", "English, Middle (1100-1500)"), #("epo", "Esperanto"), ("eo", "Esperanto"), #("est", "Estonian"), ("et", "Estonian"), #("ewe", "Ewe"), ("ee", "Ewe"), ("ewo", "Ewondo"), ("fan", "Fang"), #("fao", "Faroese"), ("fo", "Faroese"), ("fat", "Fanti"), #("fij", "Fijian"), ("fj", "Fijian"), ("fil", "Filipino; Pilipino"), #("fin", "Finnish"), ("fi", "Finnish"), #("fiu", "Finno-Ugrian languages"), ("fon", "Fon"), #("fre", "French"), ("fr", "French"), #("frm", "French, Middle (ca.1400-1600)"), #("fro", "French, Old (842-ca.1400)"), ("frr", "Northern Frisian"), ("frs", "Eastern Frisian"), #("fry", "Western Frisian"), ("fy", "Western Frisian"), #("ful", "Fulah"), ("ff", "Fulah"), ("fur", "Friulian"), ("gaa", "Ga"), ("gay", "Gayo"), ("gba", "Gbaya"), #("gem", "Germanic languages"), #("geo", "Georgian"), ("ka", "Georgian"), #("ger", "German"), ("de", "German"), ("gez", "Geez"), ("gil", "Gilbertese"), #("gla", "Gaelic; Scottish Gaelic"), ("gd", "Gaelic; Scottish Gaelic"), #("gle", "Irish"), ("ga", "Irish"), #("glg", "Galician"), ("gl", "Galician"), #("glv", "Manx"), ("gv", "Manx"), #("gmh", "German, Middle High (ca.1050-1500)"), #("goh", "German, Old High (ca.750-1050)"), ("gon", "Gondi"), ("gor", "Gorontalo"), ("got", "Gothic"), ("grb", "Grebo"), #("grc", "Greek, Ancient (to 1453)"), #("gre", "Greek, Modern (1453-)"), ("el", "Greek, Modern (1453-)"), #("grn", "Guarani"), ("gn", "Guarani"), ("gsw", "Swiss German; Alemannic; Alsatian"), #("guj", "Gujarati"), ("gu", "Gujarati"), ("gwi", "Gwich'in"), ("hai", "Haida"), #("hat", "Haitian; Haitian Creole"), ("ht", "Haitian; Haitian Creole"), #("hau", "Hausa"), ("ha", "Hausa"), ("haw", "Hawaiian"), #("heb", "Hebrew"), ("he", "Hebrew"), #("her", "Herero"), ("hz", "Herero"), ("hil", "Hiligaynon"), #("him", "Himachali languages; Western Pahari languages"), #("hin", "Hindi"), ("hi", "Hindi"), ("hit", "Hittite"), ("hmn", "Hmong; Mong"), #("hmo", "Hiri Motu"), ("ho", "Hiri Motu"), #("hrv", "Croatian"), ("hr", "Croatian"), ("hsb", "Upper Sorbian"), #("hun", "Hungarian"), ("hu", "Hungarian"), ("hup", "Hupa"), ("iba", "Iban"), #("ibo", "Igbo"), ("ig", "Igbo"), #("ice", "Icelandic"), ("is", "Icelandic"), #("ido", "Ido"), ("io", "Ido"), #("iii", "Sichuan Yi; Nuosu"), ("ii", "Sichuan Yi; Nuosu"), #("ijo", "Ijo languages"), #("iku", "Inuktitut"), ("iu", "Inuktitut"), #("ile", "Interlingue; Occidental"), ("ie", "Interlingue; Occidental"), ("ilo", "Iloko"), #("ina", "Interlingua (International Auxiliary Language Association)"), ("ia", "Interlingua (International Auxiliary Language Association)"), #("inc", "Indic languages"), #("ind", "Indonesian"), ("id", "Indonesian"), #("ine", "Indo-European languages"), ("inh", "Ingush"), #("ipk", "Inupiaq"), ("ik", "Inupiaq"), #("ira", "Iranian languages"), #("iro", "Iroquoian languages"), #("ita", "Italian"), ("it", "Italian"), #("jav", "Javanese"), ("jv", "Javanese"), ("jbo", "Lojban"), #("jpn", "Japanese"), ("ja", "Japanese"), #("jpr", "Judeo-Persian"), #("jrb", "Judeo-Arabic"), ("kaa", "Kara-Kalpak"), ("kab", "Kabyle"), ("kac", "Kachin; Jingpho"), #("kal", "Kalaallisut; Greenlandic"), ("kl", "Kalaallisut; Greenlandic"), ("kam", "Kamba"), #("kan", "Kannada"), ("kn", "Kannada"), #("kar", "Karen languages"), #("kas", "Kashmiri"), ("ks", "Kashmiri"), #("kau", "Kanuri"), ("kr", "Kanuri"), ("kaw", "Kawi"), #("kaz", "Kazakh"), ("kk", "Kazakh"), ("kbd", "Kabardian"), ("kha", "Khasi"), #("khi", "Khoisan languages"), #("khm", "Central Khmer"), ("km", "Central Khmer"), ("kho", "Khotanese; Sakan"), #("kik", "Kikuyu; Gikuyu"), ("ki", "Kikuyu; Gikuyu"), #("kin", "Kinyarwanda"), ("rw", "Kinyarwanda"), #("kir", "Kirghiz; Kyrgyz"), ("ky", "Kirghiz; Kyrgyz"), ("kmb", "Kimbundu"), ("kok", "Konkani"), #("kom", "Komi"), ("kv", "Komi"), #("kon", "Kongo"), ("kg", "Kongo"), #("kor", "Korean"), ("ko", "Korean"), ("kos", "Kosraean"), ("kpe", "Kpelle"), ("krc", "Karachay-Balkar"), ("krl", "Karelian"), #("kro", "Kru languages"), ("kru", "Kurukh"), #("kua", "Kuanyama; Kwanyama"), ("kj", "Kuanyama; Kwanyama"), ("kum", "Kumyk"), #("kur", "Kurdish"), ("ku", "Kurdish"), ("kut", "Kutenai"), ("lad", "Ladino"), ("lah", "Lahnda"), ("lam", "Lamba"), #("lao", "Lao"), ("lo", "Lao"), #("lat", "Latin"), ("la", "Latin"), #("lav", "Latvian"), ("lv", "Latvian"), ("lez", "Lezghian"), #("lim", "Limburgan; Limburger; Limburgish"), ("li", "Limburgan; Limburger; Limburgish"), #("lin", "Lingala"), ("ln", "Lingala"), #("lit", "Lithuanian"), ("lt", "Lithuanian"), ("lol", "Mongo"), ("loz", "Lozi"), #("ltz", "Luxembourgish; Letzeburgesch"), ("lb", "Luxembourgish; Letzeburgesch"), ("lua", "Luba-Lulua"), #("lub", "Luba-Katanga"), ("lu", "Luba-Katanga"), #("lug", "Ganda"), ("lg", "Ganda"), ("lui", "Luiseno"), ("lun", "Lunda"), ("luo", "Luo (Kenya and Tanzania)"), ("lus", "Lushai"), #("mac", "Macedonian"), ("mk", "Macedonian"), ("mad", "Madurese"), ("mag", "Magahi"), #("mah", "Marshallese"), ("mh", "Marshallese"), ("mai", "Maithili"), ("mak", "Makasar"), #("mal", "Malayalam"), ("ml", "Malayalam"), ("man", "Mandingo"), #("mao", "Maori"), ("mi", "Maori"), #("map", "Austronesian languages"), #("mar", "Marathi"), ("mr", "Marathi"), ("mas", "Masai"), #("may", "Malay"), ("ms", "Malay"), ("mdf", "Moksha"), ("mdr", "Mandar"), ("men", "Mende"), #("mga", "Irish, Middle (900-1200)"), ("mic", "Mi'kmaq; Micmac"), ("min", "Minangkabau"), #("mis", "Uncoded languages"), #("mkh", "Mon-Khmer languages"), #("mlg", "Malagasy"), ("mg", "Malagasy"), ("mlt", "Maltese"), ("mt", "Maltese"), ("mnc", "Manchu"), ("mni", "Manipuri"), #("mno", "Manobo languages"), ("moh", "Mohawk"), #("mon", "Mongolian"), ("mn", "Mongolian"), ("mos", "Mossi"), #("mul", "Multiple languages"), #("mun", "Munda languages"), ("mus", "Creek"), ("mwl", "Mirandese"), ("mwr", "Marwari"), #("myn", "Mayan languages"), ("myv", "Erzya"), #("nah", "Nahuatl languages"), #("nai", "North American Indian languages"), ("nap", "Neapolitan"), #("nau", "Nauru"), ("na", "Nauru"), #("nav", "Navajo; Navaho"), ("nv", "Navajo; Navaho"), #("nbl", "Ndebele, South; South Ndebele"), ("nr", "Ndebele, South; South Ndebele"), #("nde", "Ndebele, North; North Ndebele"), ("nd", "Ndebele, North; North Ndebele"), #("ndo", "Ndonga"), ("ng", "Ndonga"), ("nds", "Low German; Low Saxon; German, Low; Saxon, Low"), #("nep", "Nepali"), ("ne", "Nepali"), ("new", "Nepal Bhasa; Newari"), ("nia", "Nias"), #("nic", "Niger-Kordofanian languages"), ("niu", "Niuean"), #("nno", "Norwegian Nynorsk; Nynorsk, Norwegian"), ("nn", "Norwegian Nynorsk; Nynorsk, Norwegian"), #("nob", "Bokmål, Norwegian; Norwegian Bokmål"), ("nb", "Bokmål, Norwegian; Norwegian Bokmål"), ("nog", "Nogai"), #("non", "Norse, Old"), #("nor", "Norwegian"), ("no", "Norwegian"), ("nqo", "N'Ko"), ("nso", "Pedi; Sepedi; Northern Sotho"), #("nub", "Nubian languages"), #("nwc", "Classical Newari; Old Newari; Classical Nepal Bhasa"), #("nya", "Chichewa; Chewa; Nyanja"), ("ny", "Chichewa; Chewa; Nyanja"), ("nym", "Nyamwezi"), ("nyn", "Nyankole"), ("nyo", "Nyoro"), ("nzi", "Nzima"), #("oci", "Occitan (post 1500); Provençal"), ("oc", "Occitan (post 1500); Provençal"), #("oji", "Ojibwa"), ("oj", "Ojibwa"), #("ori", "Oriya"), ("or", "Oriya"), #("orm", "Oromo"), ("om", "Oromo"), ("osa", "Osage"), #("oss", "Ossetian; Ossetic"), ("os", "Ossetian; Ossetic"), #("ota", "Turkish, Ottoman (1500-1928)"), #("oto", "Otomian languages"), #("paa", "Papuan languages"), ("pag", "Pangasinan"), ("pal", "Pahlavi"), ("pam", "Pampanga; Kapampangan"), #("pan", "Panjabi; Punjabi"), ("pa", "Panjabi; Punjabi"), ("pap", "Papiamento"), ("pau", "Palauan"), #("peo", "Persian, Old (ca.600-400 B.C.)"), #("per", "Persian"), ("fa", "Persian"), #("phi", "Philippine languages"), ("phn", "Phoenician"), #("pli", "Pali"), ("pi", "Pali"), #("pol", "Polish"), ("pl", "Polish"), ("pon", "Pohnpeian"), #("por", "Portuguese"), ("pt", "Portuguese"), #("pra", "Prakrit languages"), #("pro", "Provençal, Old (to 1500)"), #("pus", "Pushto; Pashto"), ("ps", "Pushto; Pashto"), #("qaa-qtz", "Reserved for local use"), #("que", "Quechua"), ("qu", "Quechua"), ("raj", "Rajasthani"), ("rap", "Rapanui"), ("rar", "Rarotongan; Cook Islands Maori"), #("roa", "Romance languages"), #("roh", "Romansh"), ("rm", "Romansh"), ("rom", "Romany"), #("rum", "Romanian; Moldavian; Moldovan"), ("ro", "Romanian; Moldavian; Moldovan"), #("run", "Rundi"), ("rn", "Rundi"), ("rup", "Aromanian; Arumanian; Macedo-Romanian"), #("rus", "Russian"), ("ru", "Russian"), ("sad", "Sandawe"), #("sag", "Sango"), ("sg", "Sango"), ("sah", "Yakut"), #("sai", "South American Indian (Other)"), #("sal", "Salishan languages"), ("sam", "Samaritan Aramaic"), #("san", "Sanskrit"), ("sa", "Sanskrit"), ("sas", "Sasak"), ("sat", "Santali"), ("scn", "Sicilian"), ("sco", "Scots"), ("sel", "Selkup"), #("sem", "Semitic languages"), #("sga", "Irish, Old (to 900)"), #("sgn", "Sign Languages"), ("shn", "Shan"), ("sid", "Sidamo"), #("sin", "Sinhala; Sinhalese"), ("si", "Sinhala; Sinhalese"), #("sio", "Siouan languages"), #("sit", "Sino-Tibetan languages"), #("sla", "Slavic languages"), #("slo", "Slovak"), ("sk", "Slovak"), #("slv", "Slovenian"), ("sl", "Slovenian"), ("sma", "Southern Sami"), #("sme", "Northern Sami"), ("se", "Northern Sami"), #("smi", "Sami languages"), ("smj", "Lule Sami"), ("smn", "Inari Sami"), #("smo", "Samoan"), ("sm", "Samoan"), ("sms", "Skolt Sami"), #("sna", "Shona"), ("sn", "Shona"), #("snd", "Sindhi"), ("sd", "Sindhi"), ("snk", "Soninke"), ("sog", "Sogdian"), #("som", "Somali"), ("so", "Somali"), #("son", "Songhai languages"), #("sot", "Sotho, Southern"), ("st", "Sotho, Southern"), #("spa", "Spanish; Castilian"), ("es", "Spanish; Castilian"), #("srd", "Sardinian"), ("sc", "Sardinian"), ("srn", "Sranan Tongo"), #("srp", "Serbian"), ("sr", "Serbian"), ("srr", "Serer"), #("ssa", "Nilo-Saharan languages"), #("ssw", "Swati"), ("ss", "Swati"), ("suk", "Sukuma"), #("sun", "Sundanese"), ("su", "Sundanese"), ("sus", "Susu"), ("sux", "Sumerian"), #("swa", "Swahili"), ("sw", "Swahili"), #("swe", "Swedish"), ("sv", "Swedish"), #("syc", "Classical Syriac"), ("syr", "Syriac"), #("tah", "Tahitian"), ("ty", "Tahitian"), #("tai", "Tai languages"), #("tam", "Tamil"), ("ta", "Tamil"), #("tat", "Tatar"), ("tt", "Tatar"), #("tel", "Telugu"), ("te", "Telugu"), ("tem", "Timne"), ("ter", "Tereno"), ("tet", "Tetum"), #("tgk", "Tajik"), ("tg", "Tajik"), #("tgl", "Tagalog"), ("tl", "Tagalog"), #("tha", "Thai"), ("th", "Thai"), #("tib", "Tibetan"), ("bo", "Tibetan"), ("tig", "Tigre"), #("tir", "Tigrinya"), ("ti", "Tigrinya"), ("tiv", "Tiv"), ("tkl", "Tokelau"), #("tlh", "Klingon; tlhIngan-Hol"), ("tli", "Tlingit"), ("tmh", "Tamashek"), ("tog", "Tonga (Nyasa)"), #("ton", "Tonga (Tonga Islands)"), ("to", "Tonga (Tonga Islands)"), ("tpi", "Tok Pisin"), ("tsi", "Tsimshian"), #("tsn", "Tswana"), ("tn", "Tswana"), #("tso", "Tsonga"), ("ts", "Tsonga"), #("tuk", "Turkmen"), ("tk", "Turkmen"), ("tum", "Tumbuka"), #("tup", "Tupi languages"), #("tur", "Turkish"), ("tr", "Turkish"), #("tut", "Altaic languages"), ("tvl", "Tuvalu"), #("twi", "Twi"), ("tw", "Twi"), ("tyv", "Tuvinian"), ("udm", "Udmurt"), ("uga", "Ugaritic"), #("uig", "Uighur; Uyghur"), ("ug", "Uighur; Uyghur"), #("ukr", "Ukrainian"), ("uk", "Ukrainian"), ("umb", "Umbundu"), #("und", "Undetermined"), #("urd", "Urdu"), ("ur", "Urdu"), #("uzb", "Uzbek"), ("uz", "Uzbek"), ("vai", "Vai"), #("ven", "Venda"), ("ve", "Venda"), #("vie", "Vietnamese"), ("vi", "Vietnamese"), #("vol", "Volapük"), ("vo", "Volapük"), ("vot", "Votic"), #("wak", "Wakashan languages"), ("wal", "Walamo"), ("war", "Waray"), ("was", "Washo"), #("wel", "Welsh"), ("cy", "Welsh"), #("wen", "Sorbian languages"), #("wln", "Walloon"), ("wa", "Walloon"), #("wol", "Wolof"), ("wo", "Wolof"), ("xal", "Kalmyk; Oirat"), #("xho", "Xhosa"), ("xh", "Xhosa"), ("yao", "Yao"), ("yap", "Yapese"), #("yid", "Yiddish"), ("yi", "Yiddish"), #("yor", "Yoruba"), ("yo", "Yoruba"), #("ypk", "Yupik languages"), ("zap", "Zapotec"), #("zbl", "Blissymbols; Blissymbolics; Bliss"), ("zen", "Zenaga"), ("zgh", "Standard Moroccan Tamazight"), #("zha", "Zhuang; Chuang"), ("za", "Zhuang; Chuang"), #("znd", "Zande languages"), #("zul", "Zulu"), ("zu", "Zulu"), ("zun", "Zuni"), #("zxx", "No linguistic content; Not applicable"), ("zza", "Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki"), ] # END =========================================================================
mit
4,414,899,895,666,440,000
38.259692
138
0.418878
false
jredrejo/web2pyreactpoc
languages/es.py
1
21332
# -*- coding: utf-8 -*- { '!langcode!': 'es', '!langname!': 'Español', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualice" es una expresión opcional como "campo1=\'nuevo_valor\'". No se puede actualizar o eliminar resultados de un JOIN', '%(nrows)s records found': '%(nrows)s registros encontrados', '%s %%{position}': '%s %%{posición}', '%s %%{row} deleted': '%s %%{fila} %%{eliminada}', '%s %%{row} updated': '%s %%{fila} %%{actualizada}', '%s selected': '%s %%{seleccionado}', '%Y-%m-%d': '%d/%m/%Y', '%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S', '(something like "it-it")': '(algo como "eso-eso")', '@markmin\x01An error occured, please [[reload %s]] the page': 'Ha ocurrido un error, por favor [[recargar %s]] la página', '@markmin\x01Number of entries: **%s**': 'Número de entradas: **%s**', 'A new version of web2py is available': 'Hay una nueva versión de web2py disponible', 'A new version of web2py is available: %s': 'Hay una nueva versión de web2py disponible: %s', 'About': 'Acerca de', 'about': 'acerca de', 'About application': 'Acerca de la aplicación', 'Access Control': 'Control de Acceso', 'Add': 'Añadir', 'additional code for your application': 'código adicional para su aplicación', 'admin disabled because no admin password': 'admin deshabilitado por falta de contraseña', 'admin disabled because not supported on google app engine': 'admin deshabilitado, no es soportado en GAE', 'admin disabled because unable to access password file': 'admin deshabilitado, imposible acceder al archivo con la contraseña', 'Admin is disabled because insecure channel': 'Admin deshabilitado, el canal no es seguro', 'Admin is disabled because unsecure channel': 'Admin deshabilitado, el canal no es seguro', 'Administrative interface': 'Interfaz administrativa', 'Administrative Interface': 'Interfaz Administrativa', 'Administrator Password:': 'Contraseña del Administrador:', 'Advanced': 'Advanced', 'Ajax Recipes': 'Recetas AJAX', 'An error occured, please %s the page': 'Ha ocurrido un error, por favor %s la página', 'And': 'Y', 'and rename it (required):': 'y renómbrela (requerido):', 'and rename it:': ' y renómbrelo:', 'appadmin': 'appadmin', 'appadmin is disabled because insecure channel': 'admin deshabilitado, el canal no es seguro', 'application "%s" uninstalled': 'aplicación "%s" desinstalada', 'application compiled': 'aplicación compilada', 'application is compiled and cannot be designed': 'la aplicación está compilada y no puede ser modificada', 'Apply changes': 'Aplicar cambios', 'Appointment': 'Nombramiento', 'Are you sure you want to delete file "%s"?': '¿Está seguro que desea eliminar el archivo "%s"?', 'Are you sure you want to delete this object?': '¿Está seguro que desea borrar este objeto?', 'Are you sure you want to uninstall application "%s"': '¿Está seguro que desea desinstalar la aplicación "%s"', 'Are you sure you want to uninstall application "%s"?': '¿Está seguro que desea desinstalar la aplicación "%s"?', 'at': 'en', 'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCION: Inicio de sesión requiere una conexión segura (HTTPS) o localhost.', 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.', 'ATTENTION: you cannot edit the running application!': 'ATENCION: no puede modificar la aplicación que está ejecutandose!', 'Authentication': 'Autenticación', 'Authentication failed at client DB!': '¡La autenticación ha fallado en la BDD cliente!', 'Authentication failed at main DB!': '¡La autenticación ha fallado en la BDD principal!', 'Available Databases and Tables': 'Bases de datos y tablas disponibles', 'Back': 'Atrás', 'Buy this book': 'Compra este libro', "Buy web2py's book": "Buy web2py's book", 'Cache': 'Caché', 'cache': 'caché', 'Cache Keys': 'Llaves de la Caché', 'cache, errors and sessions cleaned': 'caché, errores y sesiones eliminados', 'Cannot be empty': 'No puede estar vacío', 'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se puede compilar: hay errores en su aplicación. Depure, corrija errores y vuelva a intentarlo.', 'cannot create file': 'no es posible crear archivo', 'cannot upload file "%(filename)s"': 'no es posible subir archivo "%(filename)s"', 'Change Password': 'Cambie la Contraseña', 'Change password': 'Cambie la contraseña', 'change password': 'cambie la contraseña', 'check all': 'marcar todos', 'Check to delete': 'Marque para eliminar', 'choose one': 'escoja uno', 'clean': 'limpiar', 'Clear': 'Limpiar', 'Clear CACHE?': '¿Limpiar CACHÉ?', 'Clear DISK': 'Limpiar DISCO', 'Clear RAM': 'Limpiar RAM', 'Click on the link %(link)s to reset your password': 'Pulse en el enlace %(link)s para reiniciar su contraseña', 'click to check for upgrades': 'haga clic para buscar actualizaciones', 'client': 'cliente', 'Client IP': 'IP del Cliente', 'Close': 'Cerrar', 'Community': 'Comunidad', 'compile': 'compilar', 'compiled application removed': 'aplicación compilada eliminada', 'Components and Plugins': 'Componentes y Plugins', 'Config.ini': 'Config.ini', 'contains': 'contiene', 'Controller': 'Controlador', 'Controllers': 'Controladores', 'controllers': 'controladores', 'Copyright': 'Copyright', 'create file with filename:': 'cree archivo con nombre:', 'Create new application': 'Cree una nueva aplicación', 'create new application:': 'nombre de la nueva aplicación:', 'Created By': 'Creado Por', 'Created On': 'Creado En', 'CSV (hidden cols)': 'CSV (columnas ocultas)', 'Current request': 'Solicitud en curso', 'Current response': 'Respuesta en curso', 'Current session': 'Sesión en curso', 'currently saved or': 'actualmente guardado o', 'customize me!': '¡Adáptame!', 'data uploaded': 'datos subidos', 'Database': 'Base de datos', 'Database %s select': 'selección en base de datos %s', 'database administration': 'administración de base de datos', 'Database Administration (appadmin)': 'Administración de Base de Datos (appadmin)', 'Date and Time': 'Fecha y Hora', 'DB': 'BDD', 'db': 'bdd', 'DB Model': 'Modelo BDD', 'defines tables': 'define tablas', 'Delete': 'Eliminar', 'delete': 'eliminar', 'delete all checked': 'eliminar marcados', 'Delete:': 'Eliminar:', 'Demo': 'Demostración', 'Deploy on Google App Engine': 'Despliegue en Google App Engine', 'Deployment Recipes': 'Recetas de despliegue', 'Description': 'Descripción', 'design': 'diseño', 'DESIGN': 'DISEÑO', 'Design': 'Design', 'Design for': 'Diseño por', 'detecting': 'detectando', 'DISK': 'DISCO', 'Disk Cache Keys': 'Llaves de Caché en Disco', 'Disk Cleared': 'Disco limpiado', 'Documentation': 'Documentación', "Don't know what to do?": '¿No sabe que hacer?', 'done!': '¡hecho!', 'Download': 'Descargas', 'E-mail': 'Correo electrónico', 'edit': 'editar', 'EDIT': 'EDITAR', 'Edit': 'Editar', 'Edit application': 'Editar aplicación', 'edit controller': 'editar controlador', 'Edit current record': 'Edite el registro actual', 'Edit Profile': 'Editar Perfil', 'edit profile': 'editar perfil', 'Edit This App': 'Edite esta App', 'Editing file': 'Editando archivo', 'Editing file "%s"': 'Editando archivo "%s"', 'Email and SMS': 'Correo electrónico y SMS', 'Email sent': 'Correo electrónico enviado', 'End of impersonation': 'Fin de suplantación', 'enter a number between %(min)g and %(max)g': 'introduzca un número entre %(min)g y %(max)g', 'enter a value': 'introduzca un valor', 'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g', 'enter an integer between %(min)g and %(max)g': 'introduzca un entero entre %(min)g y %(max)g', 'enter date and time as %(format)s': 'introduzca fecha y hora como %(format)s', 'Error logs for "%(app)s"': 'Bitácora de errores en "%(app)s"', 'errors': 'errores', 'Errors': 'Errores', 'Errors in form, please check it out.': 'Hay errores en el formulario, por favor comprúebelo.', 'export as csv file': 'exportar como archivo CSV', 'Export:': 'Exportar:', 'exposes': 'expone', 'extends': 'extiende', 'failed to reload module': 'la recarga del módulo ha fallado', 'FAQ': 'FAQ', 'file "%(filename)s" created': 'archivo "%(filename)s" creado', 'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado', 'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido', 'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado', 'file "%s" of %s restored': 'archivo "%s" de %s restaurado', 'file changed on disk': 'archivo modificado en el disco', 'file does not exist': 'archivo no existe', 'file saved on %(time)s': 'archivo guardado %(time)s', 'file saved on %s': 'archivo guardado %s', 'First name': 'Nombre', 'Forgot username?': '¿Olvidó el nombre de usuario?', 'Forms': 'Forms', 'Forms and Validators': 'Formularios y validadores', 'Free Applications': 'Aplicaciones Libres', 'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].', 'Group %(group_id)s created': 'Grupo %(group_id)s creado', 'Group ID': 'ID de Grupo', 'Group uniquely assigned to user %(id)s': 'Grupo asignado únicamente al usuario %(id)s', 'Groups': 'Grupos', 'Hello World': 'Hola Mundo', 'help': 'ayuda', 'Helping web2py': 'Helping web2py', 'Home': 'Inicio', 'How did you get here?': '¿Cómo llegaste aquí?', 'htmledit': 'htmledit', 'Impersonate': 'Suplantar', 'import': 'importar', 'Import/Export': 'Importar/Exportar', 'in': 'en', 'includes': 'incluye', 'Index': 'Índice', 'insert new': 'inserte nuevo', 'insert new %s': 'inserte nuevo %s', 'Installed applications': 'Aplicaciones instaladas', 'Insufficient privileges': 'Privilegios insuficientes', 'internal error': 'error interno', 'Internal State': 'Estado Interno', 'Introduction': 'Introducción', 'Invalid action': 'Acción inválida', 'Invalid email': 'Correo electrónico inválido', 'invalid expression': 'expresión inválida', 'Invalid login': 'Inicio de sesión inválido', 'invalid password': 'contraseña inválida', 'Invalid Query': 'Consulta inválida', 'invalid request': 'solicitud inválida', 'Invalid reset password': 'Reinicio de contraseña inválido', 'invalid ticket': 'tiquete inválido', 'Is Active': 'Está Activo', 'Key': 'Llave', 'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado', 'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados', 'languages': 'lenguajes', 'Languages': 'Lenguajes', 'languages updated': 'lenguajes actualizados', 'Last name': 'Apellido', 'Last saved on:': 'Guardado en:', 'Layout': 'Diseño de página', 'Layout Plugins': 'Plugins de diseño', 'Layouts': 'Diseños de páginas', 'License for': 'Licencia para', 'Live Chat': 'Chat en vivo', 'loading...': 'cargando...', 'Log In': 'Log In', 'Logged in': 'Sesión iniciada', 'Logged out': 'Sesión finalizada', 'Login': 'Inicio de sesión', 'login': 'inicio de sesión', 'Login disabled by administrator': 'Inicio de sesión deshabilitado por el administrador', 'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa', 'logout': 'fin de sesión', 'Logout': 'Fin de sesión', 'Lost Password': 'Contraseña perdida', 'Lost password?': '¿Olvidó la contraseña?', 'lost password?': '¿olvidó la contraseña?', 'Main Menu': 'Menú principal', 'Manage Cache': 'Gestionar la Caché', 'Menu Model': 'Modelo "menu"', 'merge': 'combinar', 'Models': 'Modelos', 'models': 'modelos', 'Modified By': 'Modificado Por', 'Modified On': 'Modificado En', 'Modules': 'Módulos', 'modules': 'módulos', 'must be YYYY-MM-DD HH:MM:SS!': '¡debe ser DD/MM/YYYY HH:MM:SS!', 'must be YYYY-MM-DD!': '¡debe ser DD/MM/YYYY!', 'My Sites': 'Mis Sitios', 'Name': 'Nombre', 'New': 'Nuevo', 'New %(entity)s': 'Nuevo %(entity)s', 'new application "%s" created': 'nueva aplicación "%s" creada', 'New password': 'Contraseña nueva', 'New Record': 'Registro nuevo', 'new record inserted': 'nuevo registro insertado', 'next 100 rows': '100 filas siguientes', 'NO': 'NO', 'No databases in this application': 'No hay bases de datos en esta aplicación', 'No records found': 'No se han encontrado registros', 'Not authorized': 'No autorizado', 'not in': 'no en', 'Object or table name': 'Nombre del objeto o tabla', 'Old password': 'Contraseña vieja', 'Online book': 'Online book', 'Online examples': 'Ejemplos en línea', 'Or': 'O', 'or import from csv file': 'o importar desde archivo CSV', 'or provide application url:': 'o provea URL de la aplicación:', 'Origin': 'Origen', 'Original/Translation': 'Original/Traducción', 'Other Plugins': 'Otros Plugins', 'Other Recipes': 'Otras Recetas', 'Overview': 'Resumen', 'pack all': 'empaquetar todo', 'pack compiled': 'empaquetar compilados', 'Password': 'Contraseña', 'Password changed': 'Contraseña cambiada', "Password fields don't match": 'Los campos de contraseña no coinciden', 'Password reset': 'Reinicio de contraseña', 'Peeking at file': 'Visualizando archivo', 'Phone': 'Teléfono', 'please input your password again': 'por favor introduzca su contraseña otra vez', 'Plugins': 'Plugins', 'Powered by': 'Este sitio usa', 'Preface': 'Prefacio', 'previous 100 rows': '100 filas anteriores', 'Profile': 'Perfil', 'Profile updated': 'Perfil actualizado', 'Python': 'Python', 'Query Not Supported: %s': 'Consulta No Soportada: %s', 'Query:': 'Consulta:', 'Quick Examples': 'Ejemplos Rápidos', 'RAM': 'RAM', 'RAM Cache Keys': 'Llaves de la Caché en RAM', 'Ram Cleared': 'Ram Limpiada', 'Recipes': 'Recetas', 'Record': 'Registro', 'Record %(id)s created': 'Registro %(id)s creado', 'Record Created': 'Registro Creado', 'record does not exist': 'el registro no existe', 'Record ID': 'ID de Registro', 'Record id': 'Id de registro', 'register': 'regístrese', 'Register': 'Regístrese', 'Registration identifier': 'Identificador de Registro', 'Registration key': 'Llave de registro', 'Registration successful': 'Registro con éxito', 'reload': 'recargar', 'Remember me (for 30 days)': 'Recuérdame (durante 30 días)', 'remove compiled': 'eliminar compiladas', 'Request reset password': 'Solicitar reinicio de contraseña', 'Reset password': 'Reiniciar contraseña', 'Reset Password key': 'Restaurar Llave de la Contraseña', 'Resolve Conflict file': 'archivo Resolución de Conflicto', 'restore': 'restaurar', 'Retrieve username': 'Recuperar nombre de usuario', 'revert': 'revertir', 'Role': 'Rol', 'Rows in Table': 'Filas en la tabla', 'Rows selected': 'Filas seleccionadas', 'save': 'guardar', 'Saved file hash:': 'Hash del archivo guardado:', 'Search': 'Buscar', 'Semantic': 'Semántica', 'Services': 'Servicios', 'session expired': 'sesión expirada', 'shell': 'terminal', 'Sign Up': 'Sign Up', 'site': 'sitio', 'Size of cache:': 'Tamaño de la Caché:', 'some files could not be removed': 'algunos archivos no pudieron ser removidos', 'start': 'inicio', 'starts with': 'comienza por', 'state': 'estado', 'static': 'estáticos', 'Static files': 'Archivos estáticos', 'Statistics': 'Estadísticas', 'Stylesheet': 'Hoja de estilo', 'Submit': 'Enviar', 'submit': 'enviar', 'Success!': '¡Correcto!', 'Support': 'Soporte', 'Sure you want to delete this object?': '¿Está seguro que desea eliminar este objeto?', 'Table': 'tabla', 'Table name': 'Nombre de la tabla', 'test': 'probar', 'Testing application': 'Probando aplicación', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" es una condición como "db.tabla1.campo1==\'valor\'". Algo como "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.', 'the application logic, each URL path is mapped in one exposed function in the controller': 'la lógica de la aplicación, cada ruta URL se mapea en una función expuesta en el controlador', 'The Core': 'El Núcleo', 'the data representation, define database tables and sets': 'la representación de datos, define tablas y conjuntos de base de datos', 'The output of the file is a dictionary that was rendered by the view %s': 'La salida de dicha función es un diccionario que es desplegado por la vista %s', 'the presentations layer, views are also known as templates': 'la capa de presentación, las vistas también son llamadas plantillas', 'The Views': 'Las Vistas', 'There are no controllers': 'No hay controladores', 'There are no models': 'No hay modelos', 'There are no modules': 'No hay módulos', 'There are no static files': 'No hay archivos estáticos', 'There are no translators, only default language is supported': 'No hay traductores, sólo el lenguaje por defecto es soportado', 'There are no views': 'No hay vistas', 'these files are served without processing, your images go here': 'estos archivos son servidos sin procesar, sus imágenes van aquí', 'This App': 'Esta Aplicación', 'This email already has an account': 'Este correo electrónico ya tiene una cuenta', 'This is a copy of the scaffolding application': 'Esta es una copia de la aplicación de andamiaje', 'This is the %(filename)s template': 'Esta es la plantilla %(filename)s', 'Ticket': 'Tiquete', 'Time in Cache (h:m:s)': 'Tiempo en Caché (h:m:s)', 'Timestamp': 'Marca de tiempo', 'to previous version.': 'a la versión previa.', 'To emulate a breakpoint programatically, write:': 'Emular un punto de ruptura programáticamente, escribir:', 'to use the debugger!': '¡usar el depurador!', 'toggle breakpoint': 'alternar punto de ruptura', 'Toggle comment': 'Alternar comentario', 'Toggle Fullscreen': 'Alternar pantalla completa', 'too short': 'demasiado corto', 'translation strings for the application': 'cadenas de caracteres de traducción para la aplicación', 'try': 'intente', 'try something like': 'intente algo como', 'TSV (Excel compatible)': 'TSV (compatible Excel)', 'TSV (Excel compatible, hidden cols)': 'TSV (compatible Excel, columnas ocultas)', 'Twitter': 'Twitter', 'Unable to check for upgrades': 'No es posible verificar la existencia de actualizaciones', 'unable to create application "%s"': 'no es posible crear la aplicación "%s"', 'unable to delete file "%(filename)s"': 'no es posible eliminar el archivo "%(filename)s"', 'Unable to download': 'No es posible la descarga', 'Unable to download app': 'No es posible descarga la aplicación', 'unable to parse csv file': 'no es posible analizar el archivo CSV', 'unable to uninstall "%s"': 'no es posible instalar "%s"', 'uncheck all': 'desmarcar todos', 'uninstall': 'desinstalar', 'unknown': 'desconocido', 'update': 'actualizar', 'update all languages': 'actualizar todos los lenguajes', 'Update:': 'Actualice:', 'upload application:': 'subir aplicación:', 'Upload existing application': 'Suba esta aplicación', 'upload file:': 'suba archivo:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para crear consultas más complejas.', 'User %(id)s is impersonating %(other_id)s': 'El usuario %(id)s está suplantando %(other_id)s', 'User %(id)s Logged-in': 'El usuario %(id)s inició la sesión', 'User %(id)s Logged-out': 'El usuario %(id)s finalizó la sesión', 'User %(id)s Password changed': 'Contraseña del usuario %(id)s cambiada', 'User %(id)s Password reset': 'Contraseña del usuario %(id)s reiniciada', 'User %(id)s Profile updated': 'Actualizado el perfil del usuario %(id)s', 'User %(id)s Registered': 'Usuario %(id)s Registrado', 'User %(id)s Username retrieved': 'Se ha recuperado el nombre de usuario del usuario %(id)s', 'User %(username)s Logged-in': 'El usuario %(username)s inició la sesión', "User '%(username)s' Logged-in": "El usuario '%(username)s' inició la sesión", "User '%(username)s' Logged-out": "El usuario '%(username)s' finalizó la sesión", 'User Id': 'Id de Usuario', 'User ID': 'ID de Usuario', 'User Logged-out': 'El usuario finalizó la sesión', 'Username': 'Nombre de usuario', 'Username retrieve': 'Recuperar nombre de usuario', 'value already in database or empty': 'el valor ya existe en la base de datos o está vacío', 'value not allowed': 'valor no permitido', 'value not in database': 'el valor no está en la base de datos', 'Verify Password': 'Verificar Contraseña', 'Version': 'Versión', 'versioning': 'versiones', 'Videos': 'Vídeos', 'View': 'Vista', 'view': 'vista', 'View %(entity)s': 'Ver %(entity)s', 'Views': 'Vistas', 'views': 'vistas', 'web2py is up to date': 'web2py está actualizado', 'web2py Recent Tweets': 'Tweets Recientes de web2py', 'Welcome': 'Bienvenido', 'Welcome %s': 'Bienvenido %s', 'Welcome to web2py': 'Bienvenido a web2py', 'Welcome to web2py!': '¡Bienvenido a web2py!', 'Which called the function %s located in the file %s': 'La cual llamó la función %s localizada en el archivo %s', 'Working...': 'Trabajando...', 'YES': 'SÍ', 'You are successfully running web2py': 'Usted está ejecutando web2py exitosamente', 'You can modify this application and adapt it to your needs': 'Usted puede modificar esta aplicación y adaptarla a sus necesidades', 'You visited the url %s': 'Usted visitó la url %s', 'Your username is: %(username)s': 'Su nombre de usuario es: %(username)s', }
gpl-3.0
8,112,073,307,084,498,000
46.647856
281
0.705941
false
sspickle/assessdb
webapp/assessdb/views/appViews.py
1
2866
from pyramid.httpexceptions import HTTPFound from pyramid.security import ( remember, forget, ) from pyramid.response import Response from sqlalchemy.exc import DBAPIError from ..models import Course from pyramid.view import ( view_config, view_defaults ) from ..security import ( USERS, check_password ) @view_defaults(renderer='home.pt') class AssessDBViews: def __init__(self, request): self.request = request self.logged_in = request.authenticated_userid @view_config(route_name='home', renderer='../templates/home.pt') def my_view(self): try: courses = self.request.dbsession.query(Course).all() except DBAPIError: return Response(db_err_msg, content_type='text/plain', status=500) return {'name':'default view', 'courses': courses, 'project': 'assessdb'} """ @view_config(route_name='login', renderer='../templates/login.pt') def login(self): request = self.request login_url = request.route_url('login') referrer = request.url if referrer == login_url: referrer = '/' # never use login form itself as came_from came_from = request.params.get('came_from', referrer) message = '' login = '' password = '' if 'form.submitted' in request.params: login = request.params['login'] password = request.params['password'] if check_password(password, USERS.get(login)): headers = remember(request, login) return HTTPFound(location=came_from, headers=headers) message = 'Failed login' return dict( name='Login', message=message, url=request.application_url + '/login', came_from=came_from, login=login, password=password, ) @view_config(route_name='logout', renderer='../templates/logout.pt') def logout(self): request = self.request headers = forget(request) url = request.route_url('home') return HTTPFound(location=url, headers=headers) """ db_err_msg = """\ Pyramid is having a problem using your SQL database. The problem might be caused by one of the following things: 1. You may need to run the "initialize_assessdb_db" script to initialize your database tables. Check your virtual environment's "bin" directory for this script and try to run it. 2. Your database server may not be running. Check that the database server referred to by the "sqlalchemy.url" setting in your "development.ini" file is running. After you fix the problem, please restart the Pyramid application to try it again. """
bsd-2-clause
4,265,857,922,636,594,700
29.489362
81
0.606071
false
cirocosta/avisenchente
src/endpoints_proto_datastore/ndb/model.py
1
60098
# Copyright 2012 Google Inc. All Rights Reserved. """EndpointsModel definition and accompanying definitions. This model can be used to replace an existing NDB model and allow simple conversion of model classes into ProtoRPC message classes. These classes can be used to simplify endpoints API methods so that only entities need be used rather than converting between ProtoRPC messages and entities and then back again. """ import functools import itertools try: import json except ImportError: import simplejson as json import pickle from . import properties from . import utils as ndb_utils from .. import utils from protorpc import messages from google.appengine.api import datastore_types from google.appengine.datastore import datastore_query from google.appengine.ext import endpoints from google.appengine.ext import ndb __all__ = ['EndpointsModel'] QUERY_LIMIT_DEFAULT = 10 QUERY_LIMIT_MAX = 100 QUERY_MAX_EXCEEDED_TEMPLATE = '%s results requested. Exceeds limit of %s.' PROPERTY_COLLISION_TEMPLATE = ('Name conflict: %s set as an NDB property and ' 'an Endpoints alias property.') BAD_FIELDS_SCHEMA_TEMPLATE = ( 'Model %s has bad message fields schema type: %s. Only a ' 'list, tuple, dictionary or MessageFieldsSchema are allowed.') NO_MSG_FIELD_TEMPLATE = ('Tried to use a ProtoRPC message field: %s. Only ' 'simple fields can be used when allow message fields ' 'is turned off.') REQUEST_MESSAGE = 'request_message' RESPONSE_MESSAGE = 'response_message' HTTP_METHOD = 'http_method' QUERY_HTTP_METHOD = 'GET' # This global will be updated after EndpointsModel is defined and is used by # the metaclass EndpointsMetaModel BASE_MODEL_CLASS = None EndpointsAliasProperty = properties.EndpointsAliasProperty MessageFieldsSchema = utils.MessageFieldsSchema def _VerifyProperty(modelclass, attr_name): """Return a property if set on a model class, otherwise raises an exception. Args: modelclass: A subclass of EndpointsModel which has a _GetEndpointsProperty method. attr_name: String; the name of the property. Returns: The property set at the attribute name. Raises: AttributeError: if the property is not set on the class. """ prop = modelclass._GetEndpointsProperty(attr_name) if prop is None: error_msg = ('The attribute %s is not an accepted field. Accepted fields ' 'are limited to NDB properties and Endpoints alias ' 'properties.' % (attr_name,)) raise AttributeError(error_msg) return prop def ToValue(prop, value): """Serializes a value from a property to a ProtoRPC message type. Args: prop: The NDB or alias property to be converted. value: The value to be serialized. Returns: The serialized version of the value to be set on a ProtoRPC message. """ if value is None: return value elif isinstance(value, EndpointsModel): return value.ToMessage() elif hasattr(prop, 'ToValue') and callable(prop.ToValue): return prop.ToValue(value) elif isinstance(prop, ndb.JsonProperty): return json.dumps(value) elif isinstance(prop, ndb.PickleProperty): return pickle.dumps(value) elif isinstance(prop, ndb.UserProperty): return utils.UserMessageFromUser(value) elif isinstance(prop, ndb.GeoPtProperty): return utils.GeoPtMessage(lat=value.lat, lon=value.lon) elif isinstance(prop, ndb.KeyProperty): return value.urlsafe() elif isinstance(prop, ndb.BlobKeyProperty): return str(value) elif isinstance(prop, (ndb.TimeProperty, ndb.DateProperty, ndb.DateTimeProperty)): return utils.DatetimeValueToString(value) else: return value def FromValue(prop, value): """Deserializes a value from a ProtoRPC message type to a property value. Args: prop: The NDB or alias property to be set. value: The value to be deserialized. Returns: The deserialized version of the ProtoRPC value to be set on a property. Raises: TypeError: if a StructuredProperty has a model class that is not an EndpointsModel. """ if value is None: return value if isinstance(prop, (ndb.StructuredProperty, ndb.LocalStructuredProperty)): modelclass = prop._modelclass if not utils.IsSubclass(modelclass, EndpointsModel): error_msg = ('Structured properties should refer to models which ' 'inherit from EndpointsModel. Received an instance ' 'of %s.' % (modelclass.__class__.__name__,)) raise TypeError(error_msg) return modelclass.FromMessage(value) if hasattr(prop, 'FromValue') and callable(prop.FromValue): return prop.FromValue(value) elif isinstance(prop, ndb.JsonProperty): return json.loads(value) elif isinstance(prop, ndb.PickleProperty): return pickle.loads(value) elif isinstance(prop, ndb.UserProperty): return utils.UserMessageToUser(value) elif isinstance(prop, ndb.GeoPtProperty): return datastore_types.GeoPt(lat=value.lat, lon=value.lon) elif isinstance(prop, ndb.KeyProperty): return ndb.Key(urlsafe=value) elif isinstance(prop, ndb.BlobKeyProperty): return datastore_types.BlobKey(value) elif isinstance(prop, (ndb.TimeProperty, ndb.DateProperty, ndb.DateTimeProperty)): return utils.DatetimeValueFromString(value) else: return value class _EndpointsQueryInfo(object): """A custom container for query information. This will be set on an EndpointsModel (or subclass) instance, and can be used in conjunction with alias properties to store query information, simple filters, ordering and ancestor. Uses an entity to construct simple filters, to validate ordering, to validate ancestor and finally to construct a query from these filters, ordering and/or ancestor. Attributes: _entity: An instance of EndpointsModel or a subclass. The values from this will be used to create filters for a query. _filters: A set of simple equality filters (ndb.FilterNode). Utilizes the fact that FilterNodes are hashable and respect equality. _ancestor: An ndb Key to be used as an ancestor for a query. _cursor: A datastore_query.Cursor, to be used for resuming a query. _limit: A positive integer, to be used in a fetch. _order: String; comma separated list of property names or property names preceded by a minus sign. Used to define an order of query results. _order_attrs: The attributes (or negation of attributes) parsed from _order. If these can't be parsed from the attributes in _entity, will throw an exception. _query_final: A final query created using the orders (_order_attrs), filters (_filters) and class definition (_entity) in the query info. If this is not null, setting attributes on the query info object will fail. """ def __init__(self, entity): """Sets all internal variables to the default values and verifies entity. Args: entity: An instance of EndpointsModel or a subclass. Raises: TypeError: if entity is not an instance of EndpointsModel or a subclass. """ if not isinstance(entity, EndpointsModel): raise TypeError('Query info can only be used with an instance of an ' 'EndpointsModel subclass. Received: instance of %s.' % (entity.__class__.__name__,)) self._entity = entity self._filters = set() self._ancestor = None self._cursor = None self._limit = None self._order = None self._order_attrs = () self._query_final = None def _PopulateFilters(self): """Populates filters in query info by using values set on the entity.""" entity = self._entity for prop in entity._properties.itervalues(): # The name of the attr on the model/object, may differ from the name # of the NDB property in the datastore attr_name = prop._code_name current_value = getattr(entity, attr_name) # Only filter for non-null values if current_value is not None: self._AddFilter(prop == current_value) def SetQuery(self): """Sets the final query on the query info object. Uses the filters and orders in the query info to refine the query. If the final query is already set, does nothing. """ if self._query_final is not None: return self._PopulateFilters() # _entity.query calls the classmethod for the entity if self.ancestor is not None: query = self._entity.query(ancestor=self.ancestor) else: query = self._entity.query() for simple_filter in self._filters: query = query.filter(simple_filter) for order_attr in self._order_attrs: query = query.order(order_attr) self._query_final = query def _AddFilter(self, candidate_filter): """Checks a filter and sets it in the filter set. Args: candidate_filter: An NDB filter which may be added to the query info. Raises: AttributeError: if query on the object is already final. TypeError: if the filter is not a simple filter (FilterNode). ValueError: if the operator symbol in the filter is not equality. """ if self._query_final is not None: raise AttributeError('Can\'t add more filters. Query info is final.') if not isinstance(candidate_filter, ndb.FilterNode): raise TypeError('Only simple filters can be used. Received: %s.' % (candidate_filter,)) opsymbol = candidate_filter._FilterNode__opsymbol if opsymbol != '=': raise ValueError('Only equality filters allowed. Received: %s.' % (opsymbol,)) self._filters.add(candidate_filter) @property def query(self): """Public getter for the final query on query info.""" return self._query_final def _GetAncestor(self): """Getter to be used for public ancestor property on query info.""" return self._ancestor def _SetAncestor(self, value): """Setter to be used for public ancestor property on query info. Args: value: A potential value for an ancestor. Raises: AttributeError: if query on the object is already final. AttributeError: if the ancestor has already been set. TypeError: if the value to be set is not an instance of ndb.Key. """ if self._query_final is not None: raise AttributeError('Can\'t set ancestor. Query info is final.') if self._ancestor is not None: raise AttributeError('Ancestor can\'t be set twice.') if not isinstance(value, ndb.Key): raise TypeError('Ancestor must be an instance of ndb.Key.') self._ancestor = value ancestor = property(fget=_GetAncestor, fset=_SetAncestor) def _GetCursor(self): """Getter to be used for public cursor property on query info.""" return self._cursor def _SetCursor(self, value): """Setter to be used for public cursor property on query info. Args: value: A potential value for a cursor. Raises: AttributeError: if query on the object is already final. AttributeError: if the cursor has already been set. TypeError: if the value to be set is not an instance of datastore_query.Cursor. """ if self._query_final is not None: raise AttributeError('Can\'t set cursor. Query info is final.') if self._cursor is not None: raise AttributeError('Cursor can\'t be set twice.') if not isinstance(value, datastore_query.Cursor): raise TypeError('Cursor must be an instance of datastore_query.Cursor.') self._cursor = value cursor = property(fget=_GetCursor, fset=_SetCursor) def _GetLimit(self): """Getter to be used for public limit property on query info.""" return self._limit def _SetLimit(self, value): """Setter to be used for public limit property on query info. Args: value: A potential value for a limit. Raises: AttributeError: if query on the object is already final. AttributeError: if the limit has already been set. TypeError: if the value to be set is not a positive integer. """ if self._query_final is not None: raise AttributeError('Can\'t set limit. Query info is final.') if self._limit is not None: raise AttributeError('Limit can\'t be set twice.') if not isinstance(value, (int, long)) or value < 1: raise TypeError('Limit must be a positive integer.') self._limit = value limit = property(fget=_GetLimit, fset=_SetLimit) def _GetOrder(self): """Getter to be used for public order property on query info.""" return self._order def _SetOrderAttrs(self): """Helper method to set _order_attrs using the value of _order. If _order is not set, simply returns, else splits _order by commas and then looks up each value (or its negation) in the _properties of the entity on the query info object. We look up directly in _properties rather than using the attribute names on the object since only NDB property names will be used for field names. Raises: AttributeError: if one of the attributes in the order is not a property on the entity. """ if self._order is None: return unclean_attr_names = self._order.strip().split(',') result = [] for attr_name in unclean_attr_names: ascending = True if attr_name.startswith('-'): ascending = False attr_name = attr_name[1:] attr = self._entity._properties.get(attr_name) if attr is None: raise AttributeError('Order attribute %s not defined.' % (attr_name,)) if ascending: result.append(+attr) else: result.append(-attr) self._order_attrs = tuple(result) def _SetOrder(self, value): """Setter to be used for public order property on query info. Sets the value of _order and attempts to set _order_attrs as well by valling _SetOrderAttrs, which uses the value of _order. If the passed in value is None, but the query is not final and the order has not already been set, the method will return without any errors or data changed. Args: value: A potential value for an order. Raises: AttributeError: if query on the object is already final. AttributeError: if the order has already been set. TypeError: if the order to be set is not a string. """ if self._query_final is not None: raise AttributeError('Can\'t set order. Query info is final.') if self._order is not None: raise AttributeError('Order can\'t be set twice.') if value is None: return elif not isinstance(value, basestring): raise TypeError('Order must be a string.') self._order = value self._SetOrderAttrs() order = property(fget=_GetOrder, fset=_SetOrder) class EndpointsMetaModel(ndb.MetaModel): """Metaclass for EndpointsModel. This exists to create new instances of the mutable class attributes for subclasses and to verify ProtoRPC specific properties. """ def __init__(cls, name, bases, classdict): """Verifies additional ProtoRPC properties on an NDB model.""" super(EndpointsMetaModel, cls).__init__(name, bases, classdict) cls._alias_properties = {} cls._proto_models = {} cls._proto_collections = {} cls._property_to_proto = ndb_utils.NDB_PROPERTY_TO_PROTO.copy() cls._FixUpAliasProperties() cls._VerifyMessageFieldsSchema(classdict) cls._VerifyProtoMapping(classdict) def _FixUpAliasProperties(cls): """Updates the alias properties map and verifies each alias property. Raises: AttributeError: if an alias property is defined beginning with an underscore. AttributeError: if an alias property is defined that conflicts with an NDB property. """ for attr_name in dir(cls): prop = getattr(cls, attr_name, None) if isinstance(prop, EndpointsAliasProperty): if attr_name.startswith('_'): raise AttributeError('EndpointsAliasProperty %s cannot begin with an ' 'underscore character.' % (attr_name,)) if attr_name in cls._properties: raise AttributeError(PROPERTY_COLLISION_TEMPLATE % (attr_name,)) prop._FixUp(attr_name) cls._alias_properties[prop._name] = prop def _VerifyMessageFieldsSchema(cls, classdict): """Verifies that the preset message fields correspond to actual properties. If no message fields schema was set on the class, sets the schema using the default fields determing by the NDB properties and alias properties defined. In either case, converts the passed in fields to an instance of MessageFieldsSchema and sets that as the value of _message_fields_schema on the class. Args: classdict: A dictionary of new attributes defined on the class (not on any subclass). Raises: TypeError: if a message fields schema was set on the class that is not a list, tuple, dictionary, or MessageFieldsSchema instance. """ message_fields_schema = classdict.get('_message_fields_schema') if message_fields_schema is None: message_fields_schema = cls._DefaultFields() elif not isinstance(message_fields_schema, (list, tuple, dict, MessageFieldsSchema)): raise TypeError(BAD_FIELDS_SCHEMA_TEMPLATE % (cls.__name__, message_fields_schema.__class__.__name__)) else: for attr in message_fields_schema: _VerifyProperty(cls, attr) cls._message_fields_schema = MessageFieldsSchema(message_fields_schema, name=cls.__name__) def _VerifyProtoMapping(cls, classdict): """Verifies that each property on the class has an associated proto mapping. First checks if there is a _custom_property_to_proto dictionary present and then overrides the class to proto mapping found in _property_to_proto. Then, for each property (NDB or alias), tries to add a mapping first by checking for a message field attribute, and then by trying to infer based on property subclass. Args: classdict: A dictionary of new attributes defined on the class (not on any subclass). Raises: TypeError: if a key from _custom_property_to_proto is not a valid NBD property. (We don't allow EndpointsAliasProperty here because it is not meant to be subclassed and defines a message_field). TypeError: if after checking _custom_property_to_proto, message_field and inference from a superclass, no appropriate mapping is found in _property_to_proto. """ custom_property_to_proto = classdict.get('_custom_property_to_proto') if isinstance(custom_property_to_proto, dict): for key, value in custom_property_to_proto.iteritems(): if not utils.IsSubclass(key, ndb.Property): raise TypeError('Invalid property class: %s.' % (key,)) cls._property_to_proto[key] = value for prop in cls._EndpointsPropertyItervalues(): property_class = prop.__class__ cls._TryAddMessageField(property_class) cls._TryInferSuperclass(property_class) if property_class not in cls._property_to_proto: raise TypeError('No converter present for property %s' % (property_class.__name__,)) # TODO(dhermes): Consider renaming this optional property attr from # "message_field" to something more generic. It can either be # a field or it can be a method with the signature # (property instance, integer index) def _TryAddMessageField(cls, property_class): """Tries to add a proto mapping for a property class using a message field. If the property class is already in the proto mapping, does nothing. Args: property_class: The class of a property from a model. """ if property_class in cls._property_to_proto: return message_field = getattr(property_class, 'message_field', None) if message_field is not None: cls._property_to_proto[property_class] = message_field def _TryInferSuperclass(cls, property_class): """Tries to add a proto mapping for a property class by using a base class. If the property class is already in the proto mapping, does nothing. Descends up the class hierarchy until an ancestor class has more than one base class or until ndb.Property is reached. If any class up the hierarchy is already in the proto mapping, the method/field for the superclass is also set for the propert class in question. Args: property_class: The class of a property from a model. """ if (property_class in cls._property_to_proto or utils.IsSubclass(property_class, EndpointsAliasProperty)): return bases = property_class.__bases__ while len(bases) == 1 and bases[0] != ndb.Property: base = bases[0] if base in cls._property_to_proto: cls._property_to_proto[property_class] = cls._property_to_proto[base] return else: bases = base.__bases__ class EndpointsModel(ndb.Model): """Subclass of NDB model that enables translation to ProtoRPC message classes. Also uses a subclass of ndb.MetaModel as the metaclass, to allow for custom behavior (particularly property verification) on class creation. Two types of properties are allowed, the standard NDB property, which ends up in a _properties dictionary and {EndpointsAliasProperty}s, which end up in an _alias_properties dictionary. They can be accessed simultaneously through _GetEndpointsProperty. As with NDB, you cannot use the same property object to describe multiple properties -- you must create separate property objects for each property. In addition to _alias_properties, there are several other class variables that can be used to augment the default NDB model behavior: _property_to_proto: This is a mapping from properties to ProtoRPC message fields or methods which can take a property and an index and convert them to a message field. It starts out as a copy of the global NDB_PROPERTY_TO_PROTO from ndb_utils and can be augmented by your class and/or property definitions _custom_property_to_proto: if set as a dictionary, allows default mappings from NDB properties to ProtoRPC fields in _property_to_proto to be overridden. The metaclass ensures each property (alias properties included) can be converted to a ProtoRPC message field before the class can be created. Due to this, a ProtoRPC message class can be created using any subset of the model properties in any order, or a collection containing multiple messages of the same class. Once created, these ProtoRPC message classes are cached in the class variables _proto_models and _proto_collections. Endpoints models also have two class methods which can be used as decorators for Cloud Endpoints API methods: method and query_method. These methods use the endpoints.api decorator but tailor the behavior to the specific model class. Where a method decorated with the endpoints.api expects a ProtoRPC message class for the response and request type, a method decorated with the "method" decorator provided by a model class expects an instance of that class both as input and output. In order to deserialize the ProtoRPC input to an entity and serialize the entity returned by the decorated method back to ProtoRPC, request and response fields can be specified which the Endpoints model class can use to create (and cache) corresponding ProtoRPC message classes. Similarly, a method decorated with the query_method decorator expects a query for the EndpointsModel subclass both as input and output. Instead of specifying request/response fields for entities, a query and collection fields list can be used. When no fields are provided, the default fields from the class are used. This can be overridden by setting the class variable _message_fields_schema to a dictionary, list, tuple or MessageFieldsSchema of your choice. If none is provided, the default will include all NDB properties and all Endpoints Alias properties. """ __metaclass__ = EndpointsMetaModel _custom_property_to_proto = None _message_fields_schema = None # A new instance of each of these will be created by the metaclass # every time a subclass is declared _alias_properties = None _proto_models = None _proto_collections = None _property_to_proto = None def __init__(self, *args, **kwargs): """Initializes NDB model and adds a query info object. Attributes: _endpoints_query_info: An _EndpointsQueryInfo instance, directly tied to the current instance that can be used to form queries using properties provided by the instance and can be augmented by alias properties to allow custom queries. """ super(EndpointsModel, self).__init__(*args, **kwargs) self._endpoints_query_info = _EndpointsQueryInfo(self) self._from_datastore = False @property def from_datastore(self): """Property accessor that represents if the entity is from the datastore.""" return self._from_datastore @classmethod def _DefaultFields(cls): """The default fields for the class. Uses all NDB properties and alias properties which are different from the alias properties defined on the parent class EndpointsModel. """ fields = cls._properties.keys() # Only include Alias properties not defined on the base class for prop_name, prop in cls._alias_properties.iteritems(): base_alias_props = getattr(BASE_MODEL_CLASS, '_alias_properties', {}) base_prop = base_alias_props.get(prop_name) if base_prop != prop: fields.append(prop_name) return fields def _CopyFromEntity(self, entity): """Copies properties from another entity to the current one. Only sets properties on the current entity that are not already set. Args: entity: A model instance to be copied from. Raises: TypeError: if the entity passed in is not the exact same type as the current entity. """ if entity.__class__ != self.__class__: raise TypeError('Can only copy from entities of the exact type %s. ' 'Received an instance of %s.' % (self.__class__.__name__, entity.__class__.__name__)) for prop in entity._EndpointsPropertyItervalues(): # The name of the attr on the model/object, may differ # from the name of the property attr_name = prop._code_name value = getattr(entity, attr_name) if value is not None: # Only overwrite null values current_value = getattr(self, attr_name) if current_value is None: setattr(self, attr_name, value) def UpdateFromKey(self, key): """Attempts to get current entity for key and update the unset properties. Only does anything if there is a corresponding entity in the datastore. Calls _CopyFromEntity to merge the current entity with the one that was retrieved. If one was retrieved, sets _from_datastore to True to signal that an entity was retrieved. Args: key: An NDB key used to retrieve an entity. """ self._key = key entity = self._key.get() if entity is not None: self._CopyFromEntity(entity) self._from_datastore = True def IdSet(self, value): """Setter to be used for default id EndpointsAliasProperty. Sets the key on the current entity using the value passed in as the ID. Using this key, attempts to retrieve the entity from the datastore and update the unset properties of the current entity with those from the retrieved entity. Args: value: An integer ID value for a simple key. Raises: TypeError: if the value to be set is not an integer. (Though if outside of a given range, the get call will also throw an exception.) """ if not isinstance(value, (int, long)): raise TypeError('ID must be an integer.') self.UpdateFromKey(ndb.Key(self.__class__, value)) @EndpointsAliasProperty(setter=IdSet, property_type=messages.IntegerField) def id(self): """Getter to be used for default id EndpointsAliasProperty. Specifies that the ProtoRPC property_type is IntegerField, though simple string IDs or more complex IDs that use ancestors could also be used. Returns: The integer ID of the entity key, if the key is not null and the integer ID is not null, else returns None. """ if self._key is not None: return self._key.integer_id() def EntityKeySet(self, value): """Setter to be used for default entityKey EndpointsAliasProperty. Sets the key on the current entity using the urlsafe entity key string. Using the key set on the entity, attempts to retrieve the entity from the datastore and update the unset properties of the current entity with those from the retrieved entity. Args: value: String; A urlsafe entity key for an object. Raises: TypeError: if the value to be set is not a string. (Though if the string is not valid base64 or not properly encoded, the key creation will also throw an exception.) """ if not isinstance(value, basestring): raise TypeError('entityKey must be a string.') self.UpdateFromKey(ndb.Key(urlsafe=value)) @EndpointsAliasProperty(setter=EntityKeySet) def entityKey(self): """Getter to be used for default entityKey EndpointsAliasProperty. Uses the default ProtoRPC property_type StringField. Returns: The urlsafe string produced by the entity key, if the key is not null, else returns None. """ if self._key is not None: return self._key.urlsafe() def LimitSet(self, value): """Setter to be used for default limit EndpointsAliasProperty. Simply sets the limit on the entity's query info object, and the query info object handles validation. Args: value: The limit value to be set. """ self._endpoints_query_info.limit = value @EndpointsAliasProperty(setter=LimitSet, property_type=messages.IntegerField) def limit(self): """Getter to be used for default limit EndpointsAliasProperty. Uses the ProtoRPC property_type IntegerField since a limit. Returns: The integer (or null) limit from the query info on the entity. """ return self._endpoints_query_info.limit def OrderSet(self, value): """Setter to be used for default order EndpointsAliasProperty. Simply sets the order on the entity's query info object, and the query info object handles validation. Args: value: The order value to be set. """ self._endpoints_query_info.order = value @EndpointsAliasProperty(setter=OrderSet) def order(self): """Getter to be used for default order EndpointsAliasProperty. Uses the default ProtoRPC property_type StringField. Returns: The string (or null) order from the query info on the entity. """ return self._endpoints_query_info.order def PageTokenSet(self, value): """Setter to be used for default pageToken EndpointsAliasProperty. Tries to use Cursor.from_websafe_string to convert the value to a cursor and then sets the cursor on the entity's query info object, and the query info object handles validation. Args: value: The websafe string version of a cursor. """ cursor = datastore_query.Cursor.from_websafe_string(value) self._endpoints_query_info.cursor = cursor @EndpointsAliasProperty(setter=PageTokenSet) def pageToken(self): """Getter to be used for default pageToken EndpointsAliasProperty. Uses the default ProtoRPC property_type StringField. Returns: The websafe string from the cursor on the entity's query info object, or None if the cursor is null. """ cursor = self._endpoints_query_info.cursor if cursor is not None: return cursor.to_websafe_string() @classmethod def _GetEndpointsProperty(cls, attr_name): """Return a property if set on a model class. Attempts to retrieve both the NDB and alias version of the property, makes sure at most one is not null and then returns that one. Args: attr_name: String; the name of the property. Returns: The property set at the attribute name. Raises: AttributeError: if the property is both an NDB and alias property. """ property_value = cls._properties.get(attr_name) alias_value = cls._alias_properties.get(attr_name) if property_value is not None and alias_value is not None: raise AttributeError(PROPERTY_COLLISION_TEMPLATE % (attr_name,)) return property_value or alias_value @classmethod def _EndpointsPropertyItervalues(cls): """Iterator containing both NDB and alias property instances for class.""" property_values = cls._properties.itervalues() alias_values = cls._alias_properties.itervalues() return itertools.chain(property_values, alias_values) @classmethod def ProtoModel(cls, fields=None, allow_message_fields=True): """Creates a ProtoRPC message class using a subset of the class properties. Creates a MessageFieldsSchema from the passed in fields (may cause exception if not valid). If this MessageFieldsSchema is already in the cache of models, returns the cached value. If not, verifies that each property is valid (may cause exception) and then uses the proto mapping to create the corresponding ProtoRPC field. Using the created fields and the name from the MessageFieldsSchema, creates a new ProtoRPC message class by calling the type() constructor. Before returning it, it caches the newly created ProtoRPC message class. Args: fields: Optional fields, defaults to None. If None, the default from the class is used. If specified, will be converted to a MessageFieldsSchema object (and verified as such). allow_message_fields: An optional boolean; defaults to True. If True, does nothing. If False, stops ProtoRPC message classes that have one or more ProtoRPC {MessageField}s from being created. Returns: The cached or created ProtoRPC message class specified by the fields. Raises: AttributeError: if a verified property has no proto mapping registered. This is a serious error and should not occur due to what happens in the metaclass. TypeError: if a value from the proto mapping is not a ProtoRPC field or a callable method (which takes a property and an index). TypeError: if a proto mapping results in a ProtoRPC MessageField while message fields are explicitly disallowed by having allow_message_fields set to False. """ if fields is None: fields = cls._message_fields_schema # If fields is None, either the module user manaully removed the default # value or some bug has occurred in the library message_fields_schema = MessageFieldsSchema(fields, basename=cls.__name__ + 'Proto') if message_fields_schema in cls._proto_models: cached_model = cls._proto_models[message_fields_schema] if not allow_message_fields: for field in cached_model.all_fields(): if isinstance(field, messages.MessageField): error_msg = NO_MSG_FIELD_TEMPLATE % (field.__class__.__name__,) raise TypeError(error_msg) return cached_model message_fields = {} for index, name in enumerate(message_fields_schema): field_index = index + 1 prop = _VerifyProperty(cls, name) to_proto = cls._property_to_proto.get(prop.__class__) if to_proto is None: raise AttributeError('%s does not have a proto mapping for %s.' % (cls.__name__, prop.__class__.__name__)) if utils.IsSimpleField(to_proto): proto_attr = ndb_utils.MessageFromSimpleField(to_proto, prop, field_index) elif callable(to_proto): proto_attr = to_proto(prop, field_index) else: raise TypeError('Proto mapping for %s was invalid. Received %s, which ' 'was neither a ProtoRPC field, nor a callable object.' % (name, to_proto)) if not allow_message_fields: if isinstance(proto_attr, messages.MessageField): error_msg = NO_MSG_FIELD_TEMPLATE % (proto_attr.__class__.__name__,) raise TypeError(error_msg) message_fields[name] = proto_attr # TODO(dhermes): This behavior should be regulated more directly. # This is to make sure the schema name in the discovery # document is message_fields_schema.name rather than # EndpointsProtoDatastoreNdbModel{message_fields_schema.name} message_fields['__module__'] = '' message_class = type(message_fields_schema.name, (messages.Message,), message_fields) cls._proto_models[message_fields_schema] = message_class return message_class @classmethod def ProtoCollection(cls, collection_fields=None): """Creates a ProtoRPC message class using a subset of the class properties. In contrast to ProtoModel, this creates a collection with only two fields: items and nextPageToken. The field nextPageToken is used for paging through result sets, while the field items is a repeated ProtoRPC MessageField used to hold the query results. The fields passed in are used to specify the ProtoRPC message class set on the MessageField. As with ProtoModel, creates a MessageFieldsSchema from the passed in fields, checks if this MessageFieldsSchema is already in the cache of collections, and returns the cached value if it exists. If not, will call ProtoModel with the collection_fields passed in to set the ProtoRPC message class on the items MessageField. Before returning it, it caches the newly created ProtoRPC message class in a cache of collections. Args: collection_fields: Optional fields, defaults to None. If None, the default from the class is used. If specified, will be converted to a MessageFieldsSchema object (and verified as such). Returns: The cached or created ProtoRPC (collection) message class specified by the fields. """ if collection_fields is None: collection_fields = cls._message_fields_schema message_fields_schema = MessageFieldsSchema(collection_fields, basename=cls.__name__ + 'Proto') if message_fields_schema in cls._proto_collections: return cls._proto_collections[message_fields_schema] proto_model = cls.ProtoModel(fields=message_fields_schema) message_fields = { 'items': messages.MessageField(proto_model, 1, repeated=True), 'nextPageToken': messages.StringField(2), # TODO(dhermes): This behavior should be regulated more directly. # This is to make sure the schema name in the discovery # document is message_fields_schema.collection_name '__module__': '', } collection_class = type(message_fields_schema.collection_name, (messages.Message,), message_fields) cls._proto_collections[message_fields_schema] = collection_class return collection_class def ToMessage(self, fields=None): """Converts an entity to an ProtoRPC message. Uses the fields list passed in to create a ProtoRPC message class and then converts the relevant fields from the entity using ToValue. Args: fields: Optional fields, defaults to None. Passed to ProtoModel to create a ProtoRPC message class for the message. Returns: The ProtoRPC message created using the values from the entity and the fields provided for the message class. Raises: TypeError: if a repeated field has a value which is not a tuple or list. """ proto_model = self.ProtoModel(fields=fields) proto_args = {} for field in proto_model.all_fields(): name = field.name value_property = _VerifyProperty(self.__class__, name) # Since we are using getattr rather than checking self._values, this will # also work for properties which have a default set value = getattr(self, name) if value is None: continue if getattr(proto_model, name).repeated: if not isinstance(value, (list, tuple)): error_msg = ('Property %s is a repeated field and its value should ' 'be a list or tuple. Received: %s' % (name, value)) raise TypeError(error_msg) to_add = [ToValue(value_property, element) for element in value] else: to_add = ToValue(value_property, value) proto_args[name] = to_add return proto_model(**proto_args) @classmethod def FromMessage(cls, message): """Converts a ProtoRPC message to an entity of the model class. Makes sure the message being converted is an instance of a ProtoRPC message class we have already encountered and then converts the relevant field values to the entity values using FromValue. When collecting the values from the message for conversion to an entity, NDB and alias properties are treated differently. The NDB properties can just be passed in to the class constructor as kwargs, but the alias properties must be set after the fact, and may even throw exceptions if the message has fields corresponding to alias properties which don't define a setter. Args: message: A ProtoRPC message. Returns: The entity of the current class that was created using the message field values. Raises: TypeError: if a message class is encountered that has not been stored in the _proto_models cache on the class. This is a precaution against unkown ProtoRPC message classes. TypeError: if a repeated field has a value which is not a tuple or list. """ message_class = message.__class__ if message_class not in cls._proto_models.values(): error_msg = ('The message is an instance of %s, which is a class this ' 'EndpointsModel does not know how to process.' % (message_class.__name__)) raise TypeError(error_msg) entity_kwargs = {} alias_args = [] for field in sorted(message_class.all_fields(), key=lambda field: field.number): name = field.name value = getattr(message, name, None) if value is None: continue value_property = _VerifyProperty(cls, name) if field.repeated: if not isinstance(value, (list, tuple)): error_msg = ('Repeated attribute should be a list or tuple. ' 'Received a %s.' % (value.__class__.__name__,)) raise TypeError(error_msg) to_add = [FromValue(value_property, element) for element in value] else: to_add = FromValue(value_property, value) if isinstance(value_property, EndpointsAliasProperty): alias_args.append((name, to_add)) else: entity_kwargs[name] = to_add # Will not throw exception if a required property is not included. This # sort of exception if only thrown when attempting to put the entity. entity = cls(**entity_kwargs) # Set alias properties, will fail on an alias property if that # property was not defined with a setter for name, value in alias_args: setattr(entity, name, value) return entity @classmethod def ToMessageCollection(cls, items, collection_fields=None, next_cursor=None): """Converts a list of entities and cursor to ProtoRPC (collection) message. Uses the fields list to create a ProtoRPC (collection) message class and then converts each item into a ProtoRPC message to be set as a list of items. If the cursor is not null, we convert it to a websafe string and set the nextPageToken field on the result message. Args: items: A list of entities of this model. collection_fields: Optional fields, defaults to None. Passed to ProtoCollection to create a ProtoRPC message class for for the collection of messages. next_cursor: An optional query cursor, defaults to None. Returns: The ProtoRPC message created using the entities and cursor provided, making sure that the entity message class matches collection_fields. """ proto_model = cls.ProtoCollection(collection_fields=collection_fields) items_as_message = [item.ToMessage(fields=collection_fields) for item in items] result = proto_model(items=items_as_message) if next_cursor is not None: result.nextPageToken = next_cursor.to_websafe_string() return result @classmethod @utils.positional(1) def method(cls, request_fields=None, response_fields=None, user_required=False, **kwargs): """Creates an API method decorator using provided metadata. Augments the endpoints.method decorator-producing function by allowing API methods to receive and return a class instance rather than having to worry with ProtoRPC messages (and message class definition). By specifying a list of ProtoRPC fields rather than defining the class, response and request classes can be defined on the fly. If there is any collision between request/response field lists and potential custom request/response message definitions that can be passed to the endpoints.method decorator, this call will fail. All other arguments will be passed directly to the endpoints.method decorator-producing function. If request/response field lists are used to define custom classes, the newly defined classes will also be passed to endpoints.method as the keyword arguments request_message/response_message. If a custom request message class is passed in, the resulting decorator will not attempt to convert the ProtoRPC message it receives into an EndpointsModel entity before passing it to the decorated method. Similarly, if a custom response message class is passed in, no attempt will be made to convert the object (returned by the decorated method) in the opposite direction. NOTE: Using utils.positional(1), we ensure the class instance will be the only positional argument hence won't have leaking/collision between the endpoints.method decorator function that we mean to pass metadata to. Args: request_fields: An (optional) list, tuple, dictionary or MessageFieldsSchema that defines a field ordering in a ProtoRPC message class. Defaults to None. response_fields: An (optional) list, tuple, dictionary or MessageFieldsSchema that defines a field ordering in a ProtoRPC message class. Defaults to None. user_required: Boolean; indicates whether or not a user is required on any incoming request. Returns: A decorator that takes the metadata passed in and augments an API method. Raises: TypeError: if there is a collision (either request or response) of field list and custom message definition. """ request_message = kwargs.get(REQUEST_MESSAGE) if request_fields is not None and request_message is not None: raise TypeError('Received both a request message class and a field list ' 'for creating a request message class.') if request_message is None: kwargs[REQUEST_MESSAGE] = cls.ProtoModel(fields=request_fields) response_message = kwargs.get(RESPONSE_MESSAGE) if response_fields is not None and response_message is not None: raise TypeError('Received both a response message class and a field list ' 'for creating a response message class.') if response_message is None: kwargs[RESPONSE_MESSAGE] = cls.ProtoModel(fields=response_fields) apiserving_method_decorator = endpoints.method(**kwargs) def RequestToEntityDecorator(api_method): """A decorator that uses the metadata passed to the enclosing method. Args: api_method: A method to be decorated. Expected signature is two positional arguments, an instance object of an API service and a variable containing a deserialized API request object, most likely as a ProtoRPC message or as an instance of the current EndpointsModel class. Returns: A decorated method that uses the metadata of the enclosing method to verify the service instance, convert the arguments to ones that can be consumed by the decorated method and serialize the method output back to a ProtoRPC message. """ @functools.wraps(api_method) def EntityToRequestMethod(service_instance, request): """Stub method to be decorated. After creation, will be passed to the standard endpoints.method decorator to preserve the necessary method attributes needed for endpoints API methods. Args: service_instance: A ProtoRPC remove service instance. request: A ProtoRPC message. Returns: A ProtoRPC message, potentially serialized after being returned from a method which returns a class instance. Raises: endpoints.UnauthorizedException: if the user required boolean from the metadata is True and if there is no current endpoints user. """ if user_required and endpoints.get_current_user() is None: raise endpoints.UnauthorizedException('Invalid token.') if request_message is None: # If we are using a fields list, we can convert the message to an # instance of the current class request = cls.FromMessage(request) # If developers are using request_fields to create a request message # class for them, their method should expect to receive an instance of # the current EndpointsModel class, and if it fails for some reason # their API users will receive a 503 from an uncaught exception. response = api_method(service_instance, request) if response_message is None: # If developers using a custom request message class with # response_fields to create a response message class for them, it is # up to them to return an instance of the current EndpointsModel # class. If not, their API users will receive a 503 from an uncaught # exception. response = response.ToMessage(fields=response_fields) return response return apiserving_method_decorator(EntityToRequestMethod) return RequestToEntityDecorator @classmethod @utils.positional(1) def query_method(cls, query_fields=(), collection_fields=None, limit_default=QUERY_LIMIT_DEFAULT, limit_max=QUERY_LIMIT_MAX, user_required=False, use_projection=False, **kwargs): """Creates an API query method decorator using provided metadata. This will produce a decorator which is solely intended to decorate functions which receive queries and expect them to be decorated. Augments the endpoints.method decorator-producing function by allowing API methods to receive and return a query object. Query data will be stored in an entity using the same (de)serialization methods used by the classmethod "method". Once there, the query info object on the entity will allow conversion into a query and the decorator will execute this query. Rather than request/response fields (as in "method"), we require that callers specify query fields -- which will produce the entity before it is converted to a query -- and collection fields -- which will be passed to ProtoCollection to create a container class for items returned by the query. In contrast to "method", no custom request/response message classes can be passed in, the queries and collection responses can only be specified by the query/collection fields. THIS IS SUBJECT TO CHANGE. All other arguments will be passed directly to the endpoints.method decorator-producing function. The custom classes defined by the query/collection fields will also be passed to endpoints.method as the keyword arguments request_message/response_message. Custom {EndpointsAliasProperty}s have been defined that allow for customizing queries: limit: allows a limit to be passed in and augment the query info on the deserialized entity. order: allows an order to be passed in and augment the query info on the deserialized entity. pageToken: allows a websafe string value to be converted to a cursor and set on the query info of the deserialized entity. NOTE: Using utils.positional(1), we ensure the class instance will be the only positional argument hence won't have leaking/collision between the endpoints.method decorator function that we mean to pass metadata to. Args: query_fields: An (optional) list, tuple, dictionary or MessageFieldsSchema that define a field ordering in a ProtoRPC message class. Defaults to an empty tuple, which results in a simple datastore query of the kind. collection_fields: An (optional) list, tuple, dictionary or MessageFieldsSchema that define a field ordering in a ProtoRPC message class. Defaults to None. limit_default: An (optional) default value for the amount of items to fetch in a query. Defaults to the global QUERY_LIMIT_DEFAULT. limit_max: An (optional) max value for the amount of items to fetch in a query. Defaults to the global QUERY_LIMIT_MAX. user_required: Boolean; indicates whether or not a user is required on any incoming request. Defaults to False. use_projection: Boolean; indicates whether or the query should retrieve entire entities or just a projection using the collection fields. Defaults to False. If used, all properties in a projection must be indexed, so this should be used with care. However, when used correctly, this will speed up queries, reduce payload size and even reduce cost at times. Returns: A decorator that takes the metadata passed in and augments an API query method. The decorator will perform the fetching, the decorated method simply need return the augmented query object. Raises: TypeError: if there is a custom request or response message class was passed in. TypeError: if a http_method other than 'GET' is passed in. """ if REQUEST_MESSAGE in kwargs: raise TypeError('Received a request message class on a method intended ' 'for queries. This is explicitly not allowed. Only ' 'query_fields can be specified.') kwargs[REQUEST_MESSAGE] = cls.ProtoModel(fields=query_fields, allow_message_fields=False) if RESPONSE_MESSAGE in kwargs: raise TypeError('Received a response message class on a method intended ' 'for queries. This is explicitly not allowed. Only ' 'collection_fields can be specified.') kwargs[RESPONSE_MESSAGE] = cls.ProtoCollection( collection_fields=collection_fields) # Only allow GET for queries if HTTP_METHOD in kwargs: if kwargs[HTTP_METHOD] != QUERY_HTTP_METHOD: raise TypeError('Query requests must use the HTTP GET methods. ' 'Received %s.' % (kwargs[HTTP_METHOD],)) kwargs[HTTP_METHOD] = QUERY_HTTP_METHOD apiserving_method_decorator = endpoints.method(**kwargs) def RequestToQueryDecorator(api_method): """A decorator that uses the metadata passed to the enclosing method. Args: api_method: A method to be decorated. Expected signature is two positional arguments, an instance object of an API service and a variable containing a deserialized API request object, required here to be an NDB query object with kind set to the current EndpointsModel class. Returns: A decorated method that uses the metadata of the enclosing method to verify the service instance, convert the arguments to ones that can be consumed by the decorated method and serialize the method output back to a ProtoRPC (collection) message. """ @functools.wraps(api_method) def QueryFromRequestMethod(service_instance, request): """Stub method to be decorated. After creation, will be passed to the standard endpoints.method decorator to preserve the necessary method attributes needed for endpoints API methods. Args: service_instance: A ProtoRPC remove service instance. request: A ProtoRPC message. Returns: A ProtoRPC (collection) message, serialized after being returned from an NDB query and containing the cursor if there are more results and a cursor was returned. Raises: endpoints.UnauthorizedException: if the user required boolean from the metadata is True and if there is no current endpoints user. endpoints.ForbiddenException: if the limit passed in through the request exceeds the maximum allowed. """ if user_required and endpoints.get_current_user() is None: raise endpoints.UnauthorizedException('Invalid token.') request_entity = cls.FromMessage(request) query_info = request_entity._endpoints_query_info query_info.SetQuery() # Allow the caller to update the query query = api_method(service_instance, query_info.query) # Use limit on query info or default if none was set request_limit = query_info.limit or limit_default if request_limit > limit_max: raise endpoints.ForbiddenException( QUERY_MAX_EXCEEDED_TEMPLATE % (request_limit, limit_max)) query_options = {'start_cursor': query_info.cursor} if use_projection: projection = [value for value in collection_fields if value in cls._properties] query_options['projection'] = projection items, next_cursor, more_results = query.fetch_page( request_limit, **query_options) # Don't pass a cursor if there are no more results if not more_results: next_cursor = None return cls.ToMessageCollection(items, collection_fields=collection_fields, next_cursor=next_cursor) return apiserving_method_decorator(QueryFromRequestMethod) return RequestToQueryDecorator # Update base class global so EndpointsMetaModel can check subclasses against it BASE_MODEL_CLASS = EndpointsModel
mit
-4,827,608,363,860,962,000
38.694848
80
0.684998
false
georgiascott/sosleep
sosleep/wsgi.py
1
2223
""" WSGI config for sosleep project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os #import sys #import site #import subprocess PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../") # Add the virtualenv packages to the site directory. This uses the technique # described at http://code.google.com/p/modwsgi/wiki/VirtualEnvironments # Remember original sys.path. #prev_sys_path = list(sys.path) # Get the path to the env's site-packages directory #site_packages = subprocess.check_output([ # os.path.join(PROJECT_ROOT, '.virtualenv/bin/python'), # '-c', # 'from distutils.sysconfig import get_python_lib;' # 'print get_python_lib(),' #]).strip() # Add the virtualenv site-packages to the site packages #site.addsitedir(site_packages) # Reorder sys.path so the new directories are at the front. #new_sys_path = [] #for item in list(sys.path): # if item not in prev_sys_path: # new_sys_path.append(item) # sys.path.remove(item) #sys.path[:0] = new_sys_path # Add the app code to the path #sys.path.append(PROJECT_ROOT) os.environ['CELERY_LOADER'] = 'django' os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sosleep.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
bsd-3-clause
5,322,176,296,629,473,000
34.285714
79
0.730544
false
snawara/TensorKart
record.py
1
5968
#!/usr/bin/env python import numpy as np import os import shutil import wx import matplotlib matplotlib.use('WXAgg') from datetime import datetime from matplotlib.figure import Figure from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas from utils import take_screenshot, XboxController SAMPLE_RATE = 200 class MainWindow(wx.Frame): """ Main frame of the application """ title = 'Data Acquisition' def __init__(self): wx.Frame.__init__(self, None, title=self.title, size=(660,330)) # Init controller self.controller = XboxController() # Create GUI self.create_main_panel() # Timer self.timer = wx.Timer(self) self.Bind(wx.EVT_TIMER, self.on_timer, self.timer) self.rate = SAMPLE_RATE self.timer.Start(self.rate) self.recording = False self.t = 0 def create_main_panel(self): # Panels self.img_panel = wx.Panel(self) self.joy_panel = wx.Panel(self) self.record_panel = wx.Panel(self) # Images img = wx.Image(320,240) self.image_widget = wx.StaticBitmap(self.img_panel, wx.ID_ANY, wx.Bitmap(img)) # Joystick self.init_plot() self.PlotCanvas = FigCanvas(self.joy_panel, wx.ID_ANY, self.fig) # Recording self.txt_outputDir = wx.TextCtrl(self.record_panel, wx.ID_ANY, pos=(5,0), size=(320,30)) uid = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') self.txt_outputDir.ChangeValue("samples/train/" + uid) self.btn_record = wx.Button(self.record_panel, wx.ID_ANY, label="Record", pos=(335,0), size=(100,30)) self.Bind(wx.EVT_BUTTON, self.on_btn_record, self.btn_record) self.Bind(wx.EVT_UPDATE_UI, self.on_update_btn_record, self.btn_record) # sizers sizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(self.img_panel, 0, wx.ALL, 5) sizer.Add(self.joy_panel, 0, wx.ALL, 5) mainSizer_v = wx.BoxSizer(wx.VERTICAL) mainSizer_v.Add(sizer, 0 , wx.ALL, 5) mainSizer_v.Add(self.record_panel, 0 , wx.ALL, 5) # finalize layout self.SetAutoLayout(True) self.SetSizer(mainSizer_v) self.Layout() def init_plot(self): self.plotMem = 50 # how much data to keep on the plot self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot self.fig = Figure((4,3)) self.axes = self.fig.add_subplot(111) def on_timer(self, event): self.poll() # stop drawing if recording to avoid slow downs if self.recording == False: self.draw() def poll(self): self.bmp = take_screenshot() self.controller_data = self.controller.read() self.update_plot() if self.recording == True: self.save_data() def update_plot(self): self.plotData.append(self.controller_data) # adds to the end of the list self.plotData.pop(0) # remove the first item in the list, ie the oldest def save_data(self): image_file = self.outputDir+'/'+'img_'+str(self.t)+'.png' self.bmp.SaveFile(image_file, wx.BITMAP_TYPE_PNG) # make / open outfile outfile = open(self.outputDir+'/'+'data.csv', 'a') # write line outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' ) outfile.close() self.t += 1 def draw(self): # Image img = self.bmp.ConvertToImage() img = img.Rescale(320,240) self.image_widget.SetBitmap( img.ConvertToBitmap() ) # Joystick x = np.asarray(self.plotData) self.axes.plot(range(0,self.plotMem), x[:,0], 'r') self.axes.hold(True) self.axes.plot(range(0,self.plotMem), x[:,1], 'b') self.axes.plot(range(0,self.plotMem), x[:,2], 'g') self.axes.plot(range(0,self.plotMem), x[:,3], 'k') self.axes.plot(range(0,self.plotMem), x[:,4], 'y') self.axes.hold(False) self.PlotCanvas.draw() def on_update_btn_record(self, event): label = "Stop" if self.recording else "Record" self.btn_record.SetLabel(label) def on_btn_record(self, event): # pause timer self.timer.Stop() # switch state self.recording = not self.recording if self.recording: self.start_recording() # un pause timer self.timer.Start(self.rate) def start_recording(self): # check that a dir has been specified if self.txt_outputDir.IsEmpty(): msg = wx.MessageDialog(self, 'Specify the Output Directory', 'Error', wx.OK | wx.ICON_ERROR) msg.ShowModal() == wx.ID_YES msg.Destroy() self.recording = False else: # a directory was specified self.outputDir = self.txt_outputDir.GetValue() self.t = 0 # check if path exists - ie may be saving over data if os.path.exists(self.outputDir): msg = wx.MessageDialog(self, 'Output Directory Exists - Overwrite Data?', 'Yes or No', wx.YES_NO | wx.ICON_QUESTION) result = msg.ShowModal() == wx.ID_YES msg.Destroy() # overwrite the data if result == True: # delete the dir shutil.rmtree(self.outputDir) # re-make dir os.mkdir(self.outputDir) # do not overwrite the data else: # result == False self.recording = False self.txt_outputDir.SetFocus() # no directory so make one else: os.mkdir(self.outputDir) def on_exit(self, event): self.Destroy() if __name__ == '__main__': app = wx.App() app.frame = MainWindow() app.frame.Show() app.MainLoop()
mit
7,111,182,449,239,946,000
27.555024
132
0.572386
false
zwChan/VATEC
~/eb-flask/features.py
1
14055
__author__ = 'Jason' import os, re, csv, ast, csv import math import jinja2 jinja_environment = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates'))) import flask from utils import encode def _show_frequent_features(mysql,g_sty): topk = flask.request.form['topk'] disease = flask.request.form['disease'] # width_option = flask.request.form.get('width_option',False) # fixed_width = flask.request.form.get('fixed_width',None) condition_option = flask.request.form.get('conditions',None) # plot_type = flask.request.form.get('plot_type',None) start_month_option = flask.request.form.get('start_month',None) start_year_option = flask.request.form.get('start_year',None) start_month_before_option = flask.request.form.get('start_month_before',None) start_year_before_option = flask.request.form.get('start_year_before',None) minimum_age_option = flask.request.form.get('minimum_age',None) maximum_age_option = flask.request.form.get('maximum_age',None) gender = flask.request.form.get('gender',None) # aggregate_analysis_type = flask.request.form.get('aggregate_analysis_type',None) phase_option = flask.request.form.getlist('phase', None) status_option = flask.request.form.getlist('status', None) study_type_option = flask.request.form.getlist('study_types', None) intervention_type_option = flask.request.form.getlist('intervention_types', None) agency_type_option = flask.request.form.getlist('agency_types', None) intervention_model_option = flask.request.form.getlist('intervention_model', None) allocation_option = flask.request.form.getlist('allocation', None) time_perspective_option = flask.request.form.getlist('time_perspective', None) pattern_option = flask.request.form.getlist('pattern_option', None) conditions = condition_option # the list of trial IDs if the condition is not indexed in COMPACT trial_ids_with_conditions = [] db = mysql.connect() cur = db.cursor() filter_description = "" # a description of the clinical trails filter that is shown on the web page. nest_status = " (V.nested = 'None' or V.nested = 'nesting') " sql_var = [disease] disease_query = "V.task = %s " # check phase option and generate part of the query phase_query = "" if (len(phase_option) == 0): phase_query += " ( 1=1 )" else: filter_description += "phase=" phase_query += " (" for phase in phase_option: sql_var += ["%%%s%%" % (phase)] phase_query += " T.phase LIKE %s" + " OR" filter_description += phase + ";" # this is not meaningful, just to terminate the part of the query phase_query = phase_query.rstrip("OR") + ")" filter_description += "\n" # check status option and generate part of the query status_query = "" if (len(status_option) == 0): status_query += " (1=1)" else: filter_description += "status=" status_query += " (" for status in status_option: filter_description += status + ";" if (status == "Open Studies"): status_query += " T.overall_status = 'Recruiting' OR T.overall_status = 'Not yet recruiting' OR" elif (status == "Closed Studies"): status_query += " T.overall_status = 'Active, not recruiting' OR T.overall_status = 'Active, not recruiting' OR T.overall_status = 'Completed' OR T.overall_status = 'Withdrawn' OR T.overall_status = 'Suspended' OR T.overall_status = 'Terminated' OR T.overall_status = 'Enrolling by invitation' OR" else: sql_var += [status] status_query += " T.overall_status = %s" + " OR" # this is not meaningful, just to terminate the part of the query status_query = status_query.rstrip("OR") + ")" filter_description += "\n" # check study type option and generate part of the query study_type_query = "" if (len(study_type_option) == 0): study_type_query = " (T.study_type LIKE '%%')" else: filter_description += "study_type=" study_type_query += " (" for study_type in study_type_option: sql_var += [study_type] study_type_query += " T.study_type = %s" + " OR" filter_description += study_type + ";" study_type_query = study_type_query.rstrip("OR") + ")" filter_description += "\n" # check intervention type option and generate part of the query intervention_type_query = "" if (len(intervention_type_option) == 0): intervention_type_query = " (T.intervention_type LIKE '%%')" else: filter_description += "intervention_type=" intervention_type_query += " (" for intervention_type in intervention_type_option: sql_var += [intervention_type] intervention_type_query += " T.intervention_type = %s" + " OR" filter_description+= intervention_type + ";" intervention_type_query = intervention_type_query.rstrip("OR") + ")" filter_description+= "\n" # check agency type option and generate part of the query agency_type_query = "" if (len(agency_type_option) == 0): agency_type_query = " (T.agency_type LIKE '%%')" else: filter_description += "agency_type=" agency_type_query += " (" for agency_type in agency_type_option: sql_var += ["%%%s%%" % (agency_type)] agency_type_query += " T.agency_type LIKE %s" + " OR" filter_description += agency_type+"; " agency_type_query = agency_type_query.rstrip("OR") + ")" filter_description += "\n" # check agency type option and generate part of the query if (gender == '' or gender == 'all'): gender_query = " (T.gender LIKE '%%')" else: sql_var += [gender] gender_query = " (T.gender = %s" + ")" filter_description += "gender=%s\n" % (gender) # check start_year_option start_month_option and generate start_date_query if (start_year_option == '' or start_month_option == '' or start_month_option == 'N/A'): start_date_query = " (T.start_date LIKE '%%')" else: start_date = str(start_month_option) + " " + str(start_year_option) sql_var += [start_date] start_date_query = " (STR_TO_DATE(T.start_date,'%%M %%Y') >= STR_TO_DATE(%s ,'%%M %%Y'))" filter_description += "start_date_from=%s %s\n" % (start_month_option, start_year_option) if (start_year_before_option != '' or start_month_before_option != '' or start_month_before_option == 'N/A'): start_date_before = str(start_month_before_option) + " " + str(start_year_before_option) sql_var += [start_date_before] start_date_query += " and (STR_TO_DATE(T.start_date,'%%M %%Y') <= STR_TO_DATE(%s, '%%M %%Y'))" filter_description += "start_date_end=%s %s\n" % (start_month_before_option, start_year_before_option) # check minimum_age, maximum_age and generate age_query minimum_age = -1 maximum_age = 200 age_query = "" if (minimum_age_option != ''): try: minimum_age = float(minimum_age_option) except TypeError: pass if (maximum_age_option != ''): try: maximum_age = float(maximum_age_option) except TypeError: pass if len(minimum_age_option) > 0 or len(maximum_age_option)>0: filter_description += "Age= [%s, %s]" % (minimum_age_option, maximum_age_option) age_query = " (T.minimum_age_in_year >= %s and T.maximum_age_in_year <= %s)" sql_var += [str(minimum_age), str(maximum_age)] else: age_query = ("1=1") # check intervention model option and generate part of the query intervention_model_query = "" if (len(intervention_model_option) == 0): intervention_model_query = " (T.intervention_model LIKE '%%')" else: filter_description += "intervention_model=" intervention_model_query += " (" for intervention_model in intervention_model_option: sql_var += ["%%%s%%" %(intervention_model)] intervention_model_query += " T.intervention_model LIKE %s" + " OR" filter_description += intervention_model + "; " intervention_model_query = intervention_model_query.rstrip("OR")+")" filter_description += "\n" # check allocation option and generate part of the query allocation_query = "" if (len(allocation_option) == 0): allocation_query = " (T.allocation LIKE '%%')" else: filter_description += "allocation=" allocation_query += " (" for allocation in allocation_option: sql_var += ["%%%s%%" %(allocation)] allocation_query += " T.allocation LIKE %s" + " OR" filter_description += allocation + "; " allocation_query = allocation_query.rstrip("OR") + ")" filter_description += "\n" # check time perspective option and generate part of the query time_perspective_query = "" if (len(time_perspective_option) == 0): #time_perspective_query = " (T.time_perspective LIKE '%%')" time_perspective_query = " (1=1)" else: filter_description += "time_perspective=" time_perspective_query += " (" for time_perspective in time_perspective_option: sql_var += ["%%%s%%" %(time_perspective)] time_perspective_query += " T.time_perspective LIKE %s" + " OR" filter_description += time_perspective + ";" time_perspective_query = time_perspective_query.rstrip("OR") + ")" filter_description += "\n" # the filter of pattern pattern_query = "" if (len(pattern_option) == 0): pattern_query = " (1=1)" else: filter_description += "pattern=" pattern_query += " (" for pt in pattern_option: sql_var += [pt] pattern_query += " V.pattern = %s" + " OR" filter_description += pt + ";" pattern_query = pattern_query.rstrip("OR") + ")" # just add a false expression filter_description += "\n" value_range_error = False # get the total enrollment of trials contaning the variable in the certain range enrollment_value = 0 # start the aggregate analysis # get the total number of trials meeting the requirements trials_meeting_requirement = () #sql = "SELECT distinct V.TID, V.month FROM cancer_cui V, meta T where %s" %(curr_condition) + " and T.tid = V.tid and "+ phase_query + " and "+ status_query + " and "+ study_type_query + " and "+ intervention_type_query + " and "+ agency_type_query + " and "+ gender_query + " and "+ start_date_query + " and "+ age_query + " and "+ intervention_model_query + " and "+ allocation_query + " and "+ time_perspective_query + " and "+ disease_query filter_builder = " T.tid = V.tid and " + disease_query + " and " + phase_query + " and "+ status_query + " and "+ study_type_query + " and "+ intervention_type_query + " and "+ \ agency_type_query + " and "+ gender_query + " and "+ start_date_query + " and "+ age_query + " and "+ intervention_model_query + " and "+ \ allocation_query + " and "+ time_perspective_query + " and " + pattern_query + \ " and "+ nest_status # # sql = "SELECT V.month, count(*), count(distinct V.tid) FROM cancer_cui V, meta T where %s" % (filter_builder) # print sql num_trials_with_disease_sql = "select count(V.cui), count(distinct V.tid) from cancer_cui V, meta T where %s " %(filter_builder) cur.execute(num_trials_with_disease_sql, sql_var) num_trials_with_disease = '' num_cui_with_disease = '' for row in cur.fetchall(): num_cui_with_disease = row[0] num_trials_with_disease = row[1] # fetch data for the "frequency table" if topk == 'all': topk = '100000000000' # big enough number frequent_numeric_feature_sql = "select V.cui,V.sty,V.cui_str,count(*) as freq from cancer_cui V, meta T where %s group by V.cui,V.sty order by freq desc" % (filter_builder) + " limit %s " print (frequent_numeric_feature_sql, sql_var + [topk]) cur.execute(frequent_numeric_feature_sql, sql_var + [int(topk)]) distribution_numeric_features = [] for row in cur.fetchall(): ###cui,sty,cui_str,freq distribution_numeric_features.append((row[2],row[1], row[0],row[3], g_sty[row[1]][3], g_sty[row[1]][1])) # fetch data for the "drop list menu", only the cui with duration duration_feature_sql = "select V.cui,V.sty,V.cui_str,count(*) as freq from cancer_cui V, meta T where (monthstart >= 0 or monthend >= 0) and %s group by V.cui,V.sty order by freq desc " % (filter_builder) + " limit %s " cur.execute(duration_feature_sql, sql_var + [int(topk)]) duration_features = [] for row in cur.fetchall(): ###cui,sty,cui_str,freq duration_features.append((row[2],row[1], row[0],row[3])) filter_dict = attr_str2dict(filter_description) sql_var_str = encode("\n".join(sql_var)) filter_builder = encode(filter_builder) if db.open: db.close() return flask.render_template('features.html', **locals()) ### convert a string in format "Name1=value1\nName2=value2 ..." to a dict data structure, for further use in html page. def attr_str2dict(attrStr=""): output = {} for pair in attrStr.split("\n"): tokens = pair.split("=") if len(tokens)>=2: output[str(tokens[0]).strip()] = str(tokens[1]).strip() return output
apache-2.0
4,179,736,863,392,049,700
45.329966
449
0.594379
false
cmars/hockeypuck
contrib/pyinfra/lxd.py
1
1080
# A LXD inventory that idempotently provisions LXD containers. You could # probably do something similar with cloud APIs if so inclined. import json import os from subprocess import check_output, check_call, CalledProcessError containers=['hkp1', 'hkp2'] addrs=[] def ensure_container(name): try: check_output(['lxc', 'info', name]) except CalledProcessError: lp_user = check_output(['bzr', 'lp-login']).decode().strip() check_call(['lxc', 'launch', 'ubuntu:bionic', name]) check_call(['lxc', 'exec', name, '--', 'bash', '-c', 'while [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1; done']) check_call(['lxc', 'exec', name, '--', 'bash', '-c', 'sudo su - ubuntu -c "ssh-import-id {}"'.format(lp_user)]) addrs.append(check_output(['lxc', 'exec', name, '--', 'bash', '-c', "ip addr show eth0 | awk '/inet / {print $2}' | sed 's_/.*__'"]).decode().strip()) for name in containers: ensure_container(name) lxd_servers = [(addr, {'ssh_user': 'ubuntu', 'peers': [p for p in addrs if p != addr]}) for addr in addrs]
agpl-3.0
5,309,524,329,987,346,000
44
154
0.621296
false
scikit-hep/uproot
uproot3/rootio.py
1
78562
#!/usr/bin/env python # BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE from __future__ import absolute_import import keyword import numbers import os import re import struct import sys try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse import numpy import uproot3.const import uproot3.source.compressed from uproot3.source.memmap import MemmapSource from uproot3.source.xrootd import XRootDSource from uproot3.source.http import HTTPSource from uproot3.source.cursor import Cursor from uproot3._util import _tobytes import uproot_methods.classes ################################################################ high-level interface def open(path, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options): if isinstance(path, getattr(os, "PathLike", ())): path = os.fspath(path) elif hasattr(path, "__fspath__"): path = path.__fspath__() elif path.__class__.__module__ == "pathlib": import pathlib if isinstance(path, pathlib.Path): path = str(path) parsed = urlparse(path) if _bytesid(parsed.scheme) == b"file" or len(parsed.scheme) == 0 or (os.name == "nt" and open._windows_absolute.match(path) is not None): if not (os.name == "nt" and open._windows_absolute.match(path) is not None): path = parsed.netloc + parsed.path if isinstance(localsource, dict): kwargs = dict(MemmapSource.defaults) kwargs.update(localsource) for n in kwargs: if n in options: kwargs[n] = options.pop(n) openfcn = lambda path: MemmapSource(path, **kwargs) else: openfcn = localsource return ROOTDirectory.read(openfcn(path), **options) elif _bytesid(parsed.scheme) == b"root": return xrootd(path, xrootdsource=xrootdsource, **options) elif _bytesid(parsed.scheme) == b"http" or _bytesid(parsed.scheme) == b"https": return http(path, httpsource=httpsource, **options) else: raise ValueError("URI scheme not recognized: {0}".format(path)) open._windows_absolute = re.compile(r"^[A-Za-z]:\\") def xrootd(path, xrootdsource=XRootDSource.defaults, **options): if isinstance(xrootdsource, dict): kwargs = dict(XRootDSource.defaults) kwargs.update(xrootdsource) for n in kwargs: if n in options: kwargs[n] = options.pop(n) openfcn = lambda path: XRootDSource(path, **kwargs) else: openfcn = xrootdsource return ROOTDirectory.read(openfcn(path), **options) def http(path, httpsource=HTTPSource.defaults, **options): if isinstance(httpsource, dict): kwargs = dict(HTTPSource.defaults) kwargs.update(httpsource) for n in kwargs: if n in options: kwargs[n] = options.pop(n) openfcn = lambda path: HTTPSource(path, **kwargs) else: openfcn = httpsource return ROOTDirectory.read(openfcn(path), **options) def nofilter(x): return True ################################################################ ROOTDirectory class ROOTDirectory(object): # makes __doc__ attribute mutable before Python 3.3 __metaclass__ = type.__new__(type, "type", (type,), {}) _classname = b"TDirectory" classname = "TDirectory" class _FileContext(object): def __init__(self, sourcepath, streamerinfos, streamerinfosmap, classes, compression, tfile): self.sourcepath, self.streamerinfos, self.streamerinfosmap, self.classes, self.compression, self.tfile = sourcepath, streamerinfos, streamerinfosmap, classes, compression, tfile self.uuid = tfile["_fUUID"] def copy(self): out = ROOTDirectory._FileContext.__new__(ROOTDirectory._FileContext) out.__dict__.update(self.__dict__) return out @staticmethod def read(source, *args, **options): if len(args) == 0: try: read_streamers = options.pop("read_streamers", True) if len(options) > 0: raise TypeError("unrecognized options: {0}".format(", ".join(options))) # See https://root.cern/doc/master/classTFile.html cursor = Cursor(0) magic, fVersion = cursor.fields(source, ROOTDirectory._format1) if magic != b"root": raise ValueError("not a ROOT file (starts with {0} instead of 'root')\n in file: {1}".format(repr(magic), source.path)) if fVersion < 1000000: fBEGIN, fEND, fSeekFree, fNbytesFree, nfree, fNbytesName, fUnits, fCompress, fSeekInfo, fNbytesInfo, fUUID = cursor.fields(source, ROOTDirectory._format2_small) else: fBEGIN, fEND, fSeekFree, fNbytesFree, nfree, fNbytesName, fUnits, fCompress, fSeekInfo, fNbytesInfo, fUUID = cursor.fields(source, ROOTDirectory._format2_big) tfile = {"_fVersion": fVersion, "_fBEGIN": fBEGIN, "_fEND": fEND, "_fSeekFree": fSeekFree, "_fNbytesFree": fNbytesFree, "nfree": nfree, "_fNbytesName": fNbytesName, "_fUnits": fUnits, "_fCompress": fCompress, "_fSeekInfo": fSeekInfo, "_fNbytesInfo": fNbytesInfo, "_fUUID": fUUID} # classes requried to read streamers (bootstrap) streamerclasses = {"TStreamerInfo": TStreamerInfo, "TStreamerElement": TStreamerElement, "TStreamerBase": TStreamerBase, "TStreamerBasicType": TStreamerBasicType, "TStreamerBasicPointer": TStreamerBasicPointer, "TStreamerLoop": TStreamerLoop, "TStreamerObject": TStreamerObject, "TStreamerObjectPointer": TStreamerObjectPointer, "TStreamerObjectAny": TStreamerObjectAny, "TStreamerObjectAnyPointer": TStreamerObjectAnyPointer, "TStreamerString": TStreamerString, "TStreamerSTL": TStreamerSTL, "TStreamerSTLstring": TStreamerSTLstring, "TStreamerArtificial": TStreamerArtificial, "TList": TList, "TObjArray": TObjArray, "TObjString": TObjString} if read_streamers and fSeekInfo != 0: streamercontext = ROOTDirectory._FileContext(source.path, None, None, streamerclasses, uproot3.source.compressed.Compression(fCompress), tfile) streamerkey = TKey.read(source, Cursor(fSeekInfo), streamercontext, None) streamerinfos, streamerinfosmap, streamerrules = _readstreamers(streamerkey._source, streamerkey._cursor, streamercontext, None) else: streamerinfos, streamerinfosmap, streamerrules = [], {}, [] classes = dict(globals()) classes.update(builtin_classes) classes = _defineclasses(streamerinfos, classes) context = ROOTDirectory._FileContext(source.path, streamerinfos, streamerinfosmap, classes, uproot3.source.compressed.Compression(fCompress), tfile) context.source = source keycursor = Cursor(fBEGIN) mykey = TKey.read(source, keycursor, context, None) return ROOTDirectory.read(source, Cursor(fBEGIN + fNbytesName), context, mykey) except Exception: source.dismiss() raise else: try: if len(options) > 0: raise TypeError("unrecognized options: {0}".format(", ".join(options))) cursor, context, mykey = args # See https://root.cern/doc/master/classTDirectoryFile.html. fVersion, fDatimeC, fDatimeM, fNbytesKeys, fNbytesName = cursor.fields(source, ROOTDirectory._format3) if fVersion <= 1000: fSeekDir, fSeekParent, fSeekKeys = cursor.fields(source, ROOTDirectory._format4_small) else: fSeekDir, fSeekParent, fSeekKeys = cursor.fields(source, ROOTDirectory._format4_big) if fSeekKeys == 0: out = ROOTDirectory(b"(empty)", context, []) else: subcursor = Cursor(fSeekKeys) headerkey = TKey.read(source, subcursor, context, None) nkeys = subcursor.field(source, ROOTDirectory._format5) keys = [TKey.read(source, subcursor, context, None) for i in range(nkeys)] out = ROOTDirectory(mykey._fName, context, keys) out._fVersion, out._fDatimeC, out._fDatimeM, out._fNbytesKeys, out._fNbytesName, out._fSeekDir, out._fSeekParent, out._fSeekKeys = fVersion, fDatimeC, fDatimeM, fNbytesKeys, fNbytesName, fSeekDir, fSeekParent, fSeekKeys out.source = source return out finally: source.dismiss() _format1 = struct.Struct(">4si") _format2_small = struct.Struct(">iiiiiiBiii18s") _format2_big = struct.Struct(">iqqiiiBiqi18s") _format3 = struct.Struct(">hIIii") _format4_small = struct.Struct(">iii") _format4_big = struct.Struct(">qqq") _format5 = struct.Struct(">i") def __init__(self, name, context, keys): self.name, self._context, self._keys = name, context, keys @property def compression(self): return self._context.compression def __repr__(self): return "<ROOTDirectory {0} at 0x{1:012x}>".format(repr(self.name), id(self)) def __getitem__(self, name): return self.get(name) def __len__(self): return len(self._keys) def __iter__(self): return self.iterkeys() @staticmethod def _withoutcycle(key): return "{0}".format(key._fName.decode("ascii")).encode("ascii") @staticmethod def _withcycle(key): return "{0};{1}".format(key._fName.decode("ascii"), key._fCycle).encode("ascii") def showstreamers(self, filtername=nofilter, stream=sys.stdout): if stream is None: return "\n".join(x.show(stream=stream) for x in self._context.streamerinfos if filtername(x._fName)) else: for x in self._context.streamerinfos: if filtername(x._fName): x.show(stream=stream) def iterkeys(self, recursive=False, filtername=nofilter, filterclass=nofilter): for key in self._keys: cls = _classof(self._context, key._fClassName) if filtername(key._fName) and filterclass(cls): yield self._withcycle(key) if recursive and (key._fClassName == b"TDirectory" or key._fClassName == b"TDirectoryFile"): for name in key.get().iterkeys(recursive, filtername, filterclass): yield "{0}/{1}".format(self._withoutcycle(key).decode("ascii"), name.decode("ascii")).encode("ascii") def itervalues(self, recursive=False, filtername=nofilter, filterclass=nofilter): for key in self._keys: cls = _classof(self._context, key._fClassName) if filtername(key._fName) and filterclass(cls): yield key.get() if recursive and (key._fClassName == b"TDirectory" or key._fClassName == b"TDirectoryFile"): for value in key.get().itervalues(recursive, filtername, filterclass): yield value def iteritems(self, recursive=False, filtername=nofilter, filterclass=nofilter): for key in self._keys: cls = _classof(self._context, key._fClassName) if filtername(key._fName) and filterclass(cls): yield self._withcycle(key), key.get() if recursive and (key._fClassName == b"TDirectory" or key._fClassName == b"TDirectoryFile"): for name, value in key.get().iteritems(recursive, filtername, filterclass): yield "{0}/{1}".format(self._withoutcycle(key).decode("ascii"), name.decode("ascii")).encode("ascii"), value def iterclasses(self, recursive=False, filtername=nofilter, filterclass=nofilter): for key in self._keys: cls = _classof(self._context, key._fClassName) if filtername(key._fName) and filterclass(cls): yield self._withcycle(key), cls if recursive and (key._fClassName == b"TDirectory" or key._fClassName == b"TDirectoryFile"): for name, classname in key.get().iterclasses(recursive, filtername, filterclass): yield "{0}/{1}".format(self._withoutcycle(key).decode("ascii"), name.decode("ascii")).encode("ascii"), classname def iterclassnames(self, recursive=False, filtername=nofilter, filterclass=nofilter): for key in self._keys: cls = _classof(self._context, key._fClassName) if filtername(key._fName) and filterclass(cls): yield self._withcycle(key), key._fClassName.decode('ascii') if recursive and (key._fClassName == b"TDirectory" or key._fClassName == b"TDirectoryFile"): for name, classname in key.get().iterclassnames(recursive, filtername, filterclass): yield "{0}/{1}".format(self._withoutcycle(key).decode("ascii"), name.decode("ascii")).encode("ascii"), classname def keys(self, recursive=False, filtername=nofilter, filterclass=nofilter): return list(self.iterkeys(recursive=recursive, filtername=filtername, filterclass=filterclass)) def _ipython_key_completions_(self): "Support for completion of keys in an IPython kernel" return [item.decode("ascii") for item in self.iterkeys()] def values(self, recursive=False, filtername=nofilter, filterclass=nofilter): return list(self.itervalues(recursive=recursive, filtername=filtername, filterclass=filterclass)) def items(self, recursive=False, filtername=nofilter, filterclass=nofilter): return list(self.iteritems(recursive=recursive, filtername=filtername, filterclass=filterclass)) def classes(self, recursive=False, filtername=nofilter, filterclass=nofilter): return list(self.iterclasses(recursive=recursive, filtername=filtername, filterclass=filterclass)) def classnames(self, recursive=False, filtername=nofilter, filterclass=nofilter): return list(self.iterclassnames(recursive=recursive, filtername=filtername, filterclass=filterclass)) def allkeys(self, filtername=nofilter, filterclass=nofilter): return self.keys(recursive=True, filtername=filtername, filterclass=filterclass) def allvalues(self, filtername=nofilter, filterclass=nofilter): return self.values(recursive=True, filtername=filtername, filterclass=filterclass) def allitems(self, filtername=nofilter, filterclass=nofilter): return self.items(recursive=True, filtername=filtername, filterclass=filterclass) def allclasses(self, filtername=nofilter, filterclass=nofilter): return self.classes(recursive=True, filtername=filtername, filterclass=filterclass) def allclassnames(self, filtername=nofilter, filterclass=nofilter): return self.classnames(recursive=True, filtername=filtername, filterclass=filterclass) def get(self, name, cycle=None): name = _bytesid(name) if b"/" in name: out = self for n in name.split(b"/"): try: out = out.get(name=n, cycle=cycle) except TypeError: # Probably unexpected `cycle` keyword # Try Tree `get` options # Set recursive to `False`, because we are looking for an explicit (sub)branch try: out = out.get(name=n, recursive=False) except TypeError: # Probably unexpected `recursive` keyword # Try without options out = out.get(name=n) return out else: if cycle is None and b";" in name: at = name.rindex(b";") name, cycle = name[:at], name[at + 1:] cycle = int(cycle) last = None for key in self._keys: if key._fName == name: if cycle == key._fCycle: return key.get() elif cycle is None and last is None: last = key elif cycle is None and last._fCycle < key._fCycle: last = key if last is not None: return last.get() elif cycle is None: raise _KeyError("not found: {0}\n in file: {1}".format(repr(name), self._context.sourcepath)) else: raise _KeyError("not found: {0} with cycle {1}\n in file: {2}".format(repr(name), cycle, self._context.sourcepath)) def close(self): self._context.source.close() def __contains__(self, name): try: self.get(name) except KeyError: return False else: return True def __enter__(self, *args, **kwds): return self def __exit__(self, *args, **kwds): self.close() class _KeyError(KeyError): def __str__(self): return self.args[0] _KeyError.__name__ = "KeyError" _KeyError.__module__ = "builtins" if sys.version_info[0] > 2 else None ################################################################ helper functions for common tasks def _memsize(data): if isinstance(data, str): m = re.match(r"^\s*([+-]?(\d+(\.\d*)?|\.\d+)(e[+-]?\d+)?)\s*([kmgtpezy]?b)\s*$", data, re.I) if m is not None: target, unit = float(m.group(1)), m.group(5).upper() if unit == "KB": target *= 1024 elif unit == "MB": target *= 1024**2 elif unit == "GB": target *= 1024**3 elif unit == "TB": target *= 1024**4 elif unit == "PB": target *= 1024**5 elif unit == "EB": target *= 1024**6 elif unit == "ZB": target *= 1024**7 elif unit == "YB": target *= 1024**8 return target return None def _bytesid(x): if sys.version_info[0] > 2: if isinstance(x, str): return x.encode("ascii", "backslashreplace") else: return x else: if isinstance(x, unicode): return x.encode("ascii", "backslashreplace") else: return x def _startcheck(source, cursor): start = cursor.index cnt, vers = cursor.fields(source, _startcheck._format_cntvers) if numpy.int64(cnt) & uproot3.const.kByteCountMask: cnt = int(numpy.int64(cnt) & ~uproot3.const.kByteCountMask) return start, cnt + 4, vers else: cursor.index = start vers, = cursor.fields(source, _startcheck._format_cntvers2) return start, None, vers _startcheck._format_cntvers = struct.Struct(">IH") _startcheck._format_cntvers2 = struct.Struct(">H") def _endcheck(start, cursor, cnt): if cnt is not None: observed = cursor.index - start if observed != cnt: raise ValueError("object has {0} bytes; expected {1}".format(observed, cnt)) def _skiptobj(source, cursor): version = cursor.field(source, _skiptobj._format1) if numpy.int64(version) & uproot3.const.kByteCountVMask: cursor.skip(4) fUniqueID, fBits = cursor.fields(source, _skiptobj._format2) fBits = numpy.uint32(fBits) | uproot3.const.kIsOnHeap if fBits & uproot3.const.kIsReferenced: cursor.skip(2) _skiptobj._format1 = struct.Struct(">h") _skiptobj._format2 = struct.Struct(">II") def _nametitle(source, cursor): start, cnt, vers = _startcheck(source, cursor) _skiptobj(source, cursor) name = cursor.string(source) title = cursor.string(source) _endcheck(start, cursor, cnt) return name, title def _mapstrstr(source, cursor): cursor.skip(12) size = cursor.field(source, _mapstrstr._int32) cursor.skip(6) keys = [cursor.string(source) for i in range(size)] cursor.skip(6) values = [cursor.string(source) for i in range(size)] return dict(zip(keys, values)) _mapstrstr._int32 = struct.Struct('>I') def _readobjany(source, cursor, context, parent, asclass=None): # TBufferFile::ReadObjectAny() # https://github.com/root-project/root/blob/c4aa801d24d0b1eeb6c1623fd18160ef2397ee54/io/io/src/TBufferFile.cxx#L2684 # https://github.com/root-project/root/blob/c4aa801d24d0b1eeb6c1623fd18160ef2397ee54/io/io/src/TBufferFile.cxx#L2404 beg = cursor.index - cursor.origin bcnt = cursor.field(source, struct.Struct(">I")) if numpy.int64(bcnt) & uproot3.const.kByteCountMask == 0 or numpy.int64(bcnt) == uproot3.const.kNewClassTag: vers = 0 start = 0 tag = bcnt bcnt = 0 else: vers = 1 start = cursor.index - cursor.origin tag = cursor.field(source, struct.Struct(">I")) if numpy.int64(tag) & uproot3.const.kClassMask == 0: # reference object if tag == 0: return None # return null elif tag == 1: return parent elif tag not in cursor.refs: # jump past this object cursor.index = cursor.origin + beg + bcnt + 4 return None # return null else: return cursor.refs[tag] # return object elif tag == uproot3.const.kNewClassTag: # new class and object cname = _safename(cursor.cstring(source)) fct = context.classes.get(cname, Undefined) if vers > 0: cursor.refs[start + uproot3.const.kMapOffset] = fct else: cursor.refs[len(cursor.refs) + 1] = fct if asclass is None: obj = fct.read(source, cursor, context, parent) # new object if isinstance(obj, Undefined): obj._classname = cname else: obj = asclass.read(source, cursor, context, parent) # placeholder new object if vers > 0: cursor.refs[beg + uproot3.const.kMapOffset] = obj else: cursor.refs[len(cursor.refs) + 1] = obj return obj # return object else: # reference class, new object ref = int(numpy.int64(tag) & ~uproot3.const.kClassMask) if asclass is None: if ref not in cursor.refs: raise IOError("invalid class-tag reference\nin file: {0}".format(context.sourcepath)) fct = cursor.refs[ref] # reference class if fct not in context.classes.values(): raise IOError("invalid class-tag reference (not a recognized class: {0})\nin file: {1}".format(fct, context.sourcepath)) obj = fct.read(source, cursor, context, parent) # new object else: obj = asclass.read(source, cursor, context, parent) # placeholder new object if vers > 0: cursor.refs[beg + uproot3.const.kMapOffset] = obj else: cursor.refs[len(cursor.refs) + 1] = obj return obj # return object def _classof(context, classname): if classname == b"TDirectory" or classname == b"TDirectoryFile": cls = ROOTDirectory else: cls = context.classes.get(_safename(classname), None) if cls is None: cls = ROOTObject.__metaclass__("Undefined_" + str(_safename(classname)), (Undefined,), {"_classname": classname}) return cls def _readstreamers(source, cursor, context, parent): tlist = TList.read(source, cursor, context, parent) streamerinfos = [] streamerrules = [] for obj in tlist: if isinstance(obj, TStreamerInfo): dependencies = set() for element in obj._fElements: if isinstance(element, TStreamerBase): dependencies.add(element._fName) # if isinstance(element, (TStreamerObject, TStreamerObjectAny, TStreamerString)) or (isinstance(element, TStreamerObjectPointer) and element._fType == uproot3.const.kObjectp): # dependencies.add(element._fTypeName.rstrip(b"*")) streamerinfos.append((obj, dependencies)) elif isinstance(obj, TList) and all(isinstance(x, TObjString) for x in obj): streamerrules.append(obj) else: raise ValueError("expected TStreamerInfo or TList of TObjString in streamer info array\n in file: {0}".format(context.sourcepath)) # https://stackoverflow.com/a/11564769/1623645 def topological_sort(items): provided = set([x.encode("ascii") for x in builtin_classes]) while len(items) > 0: remaining_items = [] emitted = False for item, dependencies in items: if dependencies.issubset(provided): yield item provided.add(item._fName) emitted = True else: remaining_items.append((item, dependencies)) if not emitted: for pair in items: if pair in remaining_items: remaining_items.remove(pair) # raise ValueError("cannot sort TStreamerInfos into dependency order:\n\n{0}".format("\n".join("{0:20s} requires {1}".format(item._fName.decode("ascii"), " ".join(x.decode("ascii") for x in dependencies)) for item, dependencies in items))) items = remaining_items streamerinfos = list(topological_sort(streamerinfos)) streamerinfosmap = dict((x._fName, x) for x in streamerinfos) for streamerinfo in streamerinfos: streamerinfo.members = {} for element in streamerinfo._fElements: if isinstance(element, TStreamerBase): if element._fName in streamerinfosmap: streamerinfo.members.update(getattr(streamerinfosmap[element._fName], "members", {})) else: streamerinfo.members[element._fName] = element return streamerinfos, streamerinfosmap, streamerrules def _ftype2dtype(fType): if fType == uproot3.const.kBool: return "numpy.dtype(numpy.bool_)" elif fType == uproot3.const.kChar: return "numpy.dtype('i1')" elif fType in (uproot3.const.kUChar, uproot3.const.kCharStar): return "numpy.dtype('u1')" elif fType == uproot3.const.kShort: return "numpy.dtype('>i2')" elif fType == uproot3.const.kUShort: return "numpy.dtype('>u2')" elif fType == uproot3.const.kInt: return "numpy.dtype('>i4')" elif fType in (uproot3.const.kBits, uproot3.const.kUInt, uproot3.const.kCounter): return "numpy.dtype('>u4')" elif fType == uproot3.const.kLong: return "numpy.dtype(numpy.long).newbyteorder('>')" elif fType == uproot3.const.kULong: return "numpy.dtype('>u' + repr(numpy.dtype(numpy.long).itemsize))" elif fType == uproot3.const.kLong64: return "numpy.dtype('>i8')" elif fType == uproot3.const.kULong64: return "numpy.dtype('>u8')" elif fType in (uproot3.const.kFloat, uproot3.const.kFloat16): return "numpy.dtype('>f4')" elif fType in (uproot3.const.kDouble, uproot3.const.kDouble32): return "numpy.dtype('>f8')" else: return "None" def _longsize(issigned): if os.name == "nt": if sys.version_info[0] <= 2: return "q" if issigned else "Q" else: return "i" if issigned else "I" # wrong: gave up in PR #493 else: return "q" if issigned else "Q" def _ftype2struct(fType): if fType == uproot3.const.kBool: return "?" elif fType == uproot3.const.kChar: return "b" elif fType in (uproot3.const.kUChar, uproot3.const.kCharStar): return "B" elif fType == uproot3.const.kShort: return "h" elif fType == uproot3.const.kUShort: return "H" elif fType == uproot3.const.kInt: return "i" elif fType in (uproot3.const.kBits, uproot3.const.kUInt, uproot3.const.kCounter): return "I" elif fType == uproot3.const.kLong: return _longsize(True) elif fType == uproot3.const.kULong: return _longsize(False) elif fType == uproot3.const.kLong64: return "q" elif fType == uproot3.const.kULong64: return "Q" elif fType in (uproot3.const.kFloat, uproot3.const.kFloat16): return "f" elif fType in (uproot3.const.kDouble, uproot3.const.kDouble32): return "d" else: raise NotImplementedError(fType) def _safename(name): out = _safename._pattern.sub(lambda bad: "_" + "".join("{0:02x}".format(ord(x)) for x in bad.group(0)) + "_", name.decode("ascii")) if keyword.iskeyword(out): out = out + "__" return out _safename._pattern = re.compile("[^a-zA-Z0-9]+") def _raise_notimplemented(streamertype, streamerdict, source, cursor): raise NotImplementedError("\n\nUnimplemented streamer type: {0}\n\nmembers: {1}\n\nfile contents:\n\n{2}".format(streamertype, streamerdict, cursor.hexdump(source))) def _resolveversion(cls, self, classversion): if classversion not in cls._versions: raise ValueError("attempting to read {0} object with version {1}, but there is no streamer in this ROOT file with that class name and version (versions available: {2})".format(cls.__name__, classversion, list(cls._versions.keys()))) self.__class__ = cls._versions[classversion] def _defineclasses(streamerinfos, classes): skip = dict(builtin_skip) for streamerinfo in streamerinfos: pyclassname = _safename(streamerinfo._fName) if isinstance(streamerinfo, TStreamerInfo) and pyclassname not in builtin_classes and (pyclassname not in classes or hasattr(classes[pyclassname], "_versions")): hasreadobjany = False code = [" @classmethod", " def _readinto(cls, self, source, cursor, context, parent, asclass=None):", " start, cnt, classversion = _startcheck(source, cursor)", " startendcheck = True", " if cls._classversion != classversion:", " cursor.index = start", " if classversion in cls._versions:", " return cls._versions[classversion]._readinto(self, source, cursor, context, parent)", " elif cnt is None:", " startendcheck = False", " else:", " return Undefined.read(source, cursor, context, parent, cls.__name__)"] fields = [] recarray = [] bases = [] formats = {} dtypes = {} basicnames = [] basicletters = "" for elementi, element in enumerate(streamerinfo._fElements): if isinstance(element, TStreamerArtificial): code.append(" _raise_notimplemented({0}, {1}, source, cursor)".format(repr(element.__class__.__name__), repr(repr(element.__dict__)))) recarray.append("raise ValueError('not a recarray')") elif isinstance(element, TStreamerBase): code.append(" {0}._readinto(self, source, cursor, context, parent)".format(_safename(element._fName))) bases.append(_safename(element._fName)) elif isinstance(element, TStreamerBasicPointer): assert uproot3.const.kOffsetP < element._fType < uproot3.const.kOffsetP + 20 fType = element._fType - uproot3.const.kOffsetP dtypename = "_dtype{0}".format(len(dtypes) + 1) dtypes[dtypename] = _ftype2dtype(fType) code.append(" fBasketSeek_dtype = cls.{0}".format(dtypename)) if streamerinfo._fName == b"TBranch" and element._fName == b"fBasketSeek": code.append(" if getattr(context, \"speedbump\", True):") code.append(" if cursor.bytes(source, 1)[0] == 2:") code.append(" fBasketSeek_dtype = numpy.dtype('>i8')") else: code.append(" if getattr(context, \"speedbump\", True):") code.append(" cursor.skip(1)") code.append(" self._{0} = cursor.array(source, self._{1}, fBasketSeek_dtype)".format(_safename(element._fName), _safename(element._fCountName))) fields.append(_safename(element._fName)) recarray.append("raise ValueError('not a recarray')") elif isinstance(element, TStreamerBasicType): if element._fArrayLength == 0: basicnames.append("self._{0}".format(_safename(element._fName))) fields.append(_safename(element._fName)) fielddtype = _ftype2dtype(element._fType) if fielddtype == "None": recarray.append("raise ValueError('not a recarray')") else: recarray.append("out.append(({0}, {1}))".format(repr(str(element._fName.decode("ascii"))), fielddtype)) basicletters += _ftype2struct(element._fType) if elementi + 1 == len(streamerinfo._fElements) or not isinstance(streamerinfo._fElements[elementi + 1], TStreamerBasicType) or streamerinfo._fElements[elementi + 1]._fArrayLength != 0: formatnum = len(formats) + 1 formats["_format{0}".format(formatnum)] = "struct.Struct('>{0}')".format(basicletters) if len(basicnames) == 1: code.append(" {0} = cursor.field(source, cls._format{1})".format(basicnames[0], formatnum)) else: code.append(" {0} = cursor.fields(source, cls._format{1})".format(", ".join(basicnames), formatnum)) basicnames = [] basicletters = "" else: dtypename = "_dtype{0}".format(len(dtypes) + 1) fielddtype = dtypes[dtypename] = _ftype2dtype(element._fType) code.append(" self._{0} = cursor.array(source, {1}, cls.{2})".format(_safename(element._fName), element._fArrayLength, dtypename)) fields.append(_safename(element._fName)) if fielddtype == "None": recarray.append("raise ValueError('not a recarray')") else: recarray.append("out.append(({0}, {1}, {2}))".format(repr(str(element._fName.decode("ascii"))), fielddtype, element._fArrayLength)) elif isinstance(element, TStreamerLoop): code.extend([" cursor.skip(6)", " for index in range(self._{0}):".format(_safename(element._fCountName)), " self._{0} = {1}.read(source, cursor, context, self)".format(_safename(element._fName), _safename(element._fTypeName.rstrip(b"*")))]) elif isinstance(element, (TStreamerObjectAnyPointer, TStreamerObjectPointer)): if element._fType == uproot3.const.kObjectp or element._fType == uproot3.const.kAnyp: if pyclassname in skip and _safename(element._fName) in skip[pyclassname]: code.append(" Undefined.read(source, cursor, context, self)") else: code.append(" self._{0} = {1}.read(source, cursor, context, self)".format(_safename(element._fName), _safename(element._fTypeName.rstrip(b"*")))) fields.append(_safename(element._fName)) recarray.append("out.extend({0}._recarray())".format(_safename(element._fName))) elif element._fType == uproot3.const.kObjectP or element._fType == uproot3.const.kAnyP: if pyclassname in skip and _safename(element._fName) in skip[pyclassname]: code.append(" _readobjany(source, cursor, context, parent, asclass=Undefined)") hasreadobjany = True else: code.append(" self._{0} = _readobjany(source, cursor, context, parent)".format(_safename(element._fName))) hasreadobjany = True fields.append(_safename(element._fName)) recarray.append("raise ValueError('not a recarray')") else: code.append(" _raise_notimplemented({0}, {1}, source, cursor)".format(repr(element.__class__.__name__), repr(repr(element.__dict__)))) recarray.append("raise ValueError('not a recarray')") elif isinstance(element, TStreamerSTL): if element._fSTLtype == uproot3.const.kSTLstring or element._fTypeName == b"string": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.string(source)".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kBool) or element._fTypeName == b"vector<bool>" or element._fTypeName == b"vector<Bool_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '?')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kChar) or element._fTypeName == b"vector<char>" or element._fTypeName == b"vector<Char_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), 'i1')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kUChar) or element._fTypeName == b"vector<unsigned char>" or element._fTypeName == b"vector<UChar_t>" or element._fTypeName == b"vector<Byte_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), 'u1')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kShort) or element._fTypeName == b"vector<short>" or element._fTypeName == b"vector<Short_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>i2')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kUShort) or element._fTypeName == b"vector<unsigned short>" or element._fTypeName == b"vector<UShort_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>u2')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kInt) or element._fTypeName == b"vector<int>" or element._fTypeName == b"vector<Int_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>i4')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kUInt) or element._fTypeName == b"vector<unsigned int>" or element._fTypeName == b"vector<UInt_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>u4')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kLong) or element._fTypeName == b"vector<long>" or element._fTypeName == b"vector<Long_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>i8')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kULong) or element._fTypeName == b"vector<unsigned long>" or element._fTypeName == b"vector<ULong64_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>u8')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kFloat) or element._fTypeName == b"vector<float>" or element._fTypeName == b"vector<Float_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>f4')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif (element._fSTLtype == uproot3.const.kSTLvector and element._fCtype == uproot3.const.kDouble) or element._fTypeName == b"vector<double>" or element._fTypeName == b"vector<Double_t>": code.append(" cursor.skip(6)") code.append(" self._{0} = cursor.array(source, cursor.field(source, self._int32), '>f8')".format(_safename(element._fName))) fields.append(_safename(element._fName)) elif element._fTypeName == b"vector<string>": code.append(" cursor.skip(6)") code.append(" self._{0} = uproot3.interp.objects.STLVector(uproot3.interp.objects.STLString()).read(source, cursor, context, self)".format(_safename(element._fName))) elif element._fTypeName == b"map<string,string>": code.append(" self._{0} = _mapstrstr(source, cursor)".format(_safename(element._fName))) else: code.append(" _raise_notimplemented({0}, {1}, source, cursor)".format(repr(element.__class__.__name__), repr(repr(element.__dict__)))) recarray.append("raise ValueError('not a recarray')") elif isinstance(element, TStreamerSTLstring): code.append(" _raise_notimplemented({0}, {1}, source, cursor)".format(repr(element.__class__.__name__), repr(repr(element.__dict__)))) recarray.append("raise ValueError('not a recarray')") elif isinstance(element, (TStreamerObject, TStreamerObjectAny, TStreamerString)): if pyclassname in skip and _safename(element._fName) in skip[pyclassname]: code.append(" self._{0} = Undefined.read(source, cursor, context, self)".format(_safename(element._fName))) else: code.append(" self._{0} = {1}.read(source, cursor, context, self)".format(_safename(element._fName), _safename(element._fTypeName))) fields.append(_safename(element._fName)) recarray.append("out.extend({0}._recarray())".format(_safename(element._fTypeName))) else: raise AssertionError(element) code.extend([" if startendcheck:", " if self.__class__.__name__ == cls.__name__:", " self.__class__ = cls._versions[classversion]", " try:", " _endcheck(start, cursor, cnt)", " except ValueError:", " cursor.index = start", " return Undefined.read(source, cursor, context, parent, cls.__name__)", " return self"]) for n, v in sorted(formats.items()): code.append(" {0} = {1}".format(n, v)) for n, v in sorted(dtypes.items()): code.append(" {0} = {1}".format(n, v)) code.append(" _int32 = struct.Struct('>I')") code.insert(0, " _hasreadobjany = {0}".format(hasreadobjany)) code.insert(0, " _classversion = {0}".format(streamerinfo._fClassVersion)) code.insert(0, " _versions = versions") code.insert(0, " classname = {0}".format(repr(streamerinfo._fName.decode("ascii")))) if sys.version_info[0] > 2: code.insert(0, " _classname = {0}".format(repr(streamerinfo._fName))) else: code.insert(0, " _classname = b{0}".format(repr(streamerinfo._fName))) code.insert(0, " _fields = [{0}]".format(", ".join(repr(str(x)) for x in fields))) code.insert(0, " @classmethod\n def _recarray(cls):\n out = []\n out.append((' cnt', 'u4'))\n out.append((' vers', 'u2'))\n for base in cls._bases:\n out.extend(base._recarray())\n {0}\n return out".format("\n ".join(recarray))) code.insert(0, " _bases = [{0}]".format(", ".join(bases))) code.insert(0, " _methods = {0}".format("uproot_methods.classes.{0}.Methods".format(pyclassname) if uproot_methods.classes.hasmethods(pyclassname) else "None")) if len(bases) == 0: bases.append("ROOTStreamedObject") if pyclassname == "TTree": bases.insert(0, "uproot3.tree.TTreeMethods") if pyclassname == "TBranch": bases.insert(0, "uproot3.tree.TBranchMethods") if uproot_methods.classes.hasmethods(pyclassname): bases.insert(0, "uproot_methods.classes.{0}.Methods".format(pyclassname)) code.insert(0, "class {0}({1}):".format(pyclassname, ", ".join(bases))) if pyclassname in classes: versions = classes[pyclassname]._versions else: versions = {} classes["versions"] = versions pyclass = _makeclass(streamerinfo._fName, id(streamerinfo), "\n".join(code), classes) streamerinfo.pyclass = pyclass versions[pyclass._classversion] = pyclass return classes def _makeclass(classname, id, codestr, classes): exec(compile(codestr, "<generated from TStreamerInfo {0} at 0x{1:012x}>".format(repr(classname), id), "exec"), classes) out = classes[_safename(classname)] out._pycode = codestr return out ################################################################ built-in ROOT objects for bootstrapping up to streamed classes class ROOTObject(object): # makes __doc__ attribute mutable before Python 3.3 __metaclass__ = type.__new__(type, "type", (type,), {}) _copycontext = False @property def _classname(self): return self.__class__.__name__ @classmethod def read(cls, source, cursor, context, parent): if cls._copycontext: context = context.copy() out = cls.__new__(cls) out = cls._readinto(out, source, cursor, context, parent) out._postprocess(source, cursor, context, parent) return out @classmethod def _readinto(cls, self, source, cursor, context, parent): raise NotImplementedError def _postprocess(self, source, cursor, context, parent): pass def __repr__(self): if hasattr(self, "_fName"): return "<{0} {1} at 0x{2:012x}>".format(self.__class__.__name__, repr(self._fName), id(self)) else: return "<{0} at 0x{1:012x}>".format(self.__class__.__name__, id(self)) class TKey(ROOTObject): _classname = b"TKey" classname = "TKey" @classmethod def _readinto(cls, self, source, cursor, context, parent): start = cursor.index self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle, self._fSeekKey, self._fSeekPdir = cursor.fields(source, self._format_small) if self._fVersion > 1000: cursor.index = start self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle, self._fSeekKey, self._fSeekPdir = cursor.fields(source, self._format_big) self._fClassName = cursor.string(source) self._fName = cursor.string(source) self._fTitle = cursor.string(source) # if source.size() is not None: # if source.size() - self._fSeekKey < self._fNbytes: # raise ValueError("TKey declares that object {0} has {1} bytes but only {2} remain in the file (after the key)".format(repr(self._fName), self._fNbytes, source.size() - self._fSeekKey)) # object size != compressed size means it's compressed if self._fObjlen != self._fNbytes - self._fKeylen: self._source = uproot3.source.compressed.CompressedSource(context.compression, source, Cursor(self._fSeekKey + self._fKeylen), self._fNbytes - self._fKeylen, self._fObjlen) self._cursor = Cursor(0, origin=-self._fKeylen) # otherwise, it's uncompressed else: self._source = source self._cursor = Cursor(self._fSeekKey + self._fKeylen, origin=self._fSeekKey) self._context = context return self _format_small = struct.Struct(">ihiIhhii") _format_big = struct.Struct(">ihiIhhqq") def get(self, dismiss=True): """Extract the object this key points to. Objects are not read or decompressed until this function is explicitly called. """ try: return _classof(self._context, self._fClassName).read(self._source, self._cursor.copied(), self._context, self) finally: if dismiss: self._source.dismiss() def _canonicaltype(name): for pattern, replacement in _canonicaltype.patterns: name = pattern.sub(replacement, name) return name _canonicaltype.patterns = [ (re.compile(br"\bChar_t\b"), b"char"), # Signed Character 1 byte (char) (re.compile(br"\bUChar_t\b"), b"unsigned char"), # Unsigned Character 1 byte (unsigned char) (re.compile(br"\bShort_t\b"), b"short"), # Signed Short integer 2 bytes (short) (re.compile(br"\bUShort_t\b"), b"unsigned short"), # Unsigned Short integer 2 bytes (unsigned short) (re.compile(br"\bInt_t\b"), b"int"), # Signed integer 4 bytes (int) (re.compile(br"\bUInt_t\b"), b"unsigned int"), # Unsigned integer 4 bytes (unsigned int) (re.compile(br"\bSeek_t\b"), b"int"), # File pointer (int) (re.compile(br"\bLong_t\b"), b"long"), # Signed long integer 4 bytes (long) (re.compile(br"\bULong_t\b"), b"unsigned long"), # Unsigned long integer 4 bytes (unsigned long) (re.compile(br"\bFloat_t\b"), b"float"), # Float 4 bytes (float) (re.compile(br"\bFloat16_t\b"), b"float"), # Float 4 bytes written with a truncated mantissa (re.compile(br"\bDouble_t\b"), b"double"), # Double 8 bytes (re.compile(br"\bDouble32_t\b"), b"double"), # Double 8 bytes in memory, written as a 4 bytes float (re.compile(br"\bLongDouble_t\b"), b"long double"), # Long Double (re.compile(br"\bText_t\b"), b"char"), # General string (char) (re.compile(br"\bBool_t\b"), b"bool"), # Boolean (0=false, 1=true) (bool) (re.compile(br"\bByte_t\b"), b"unsigned char"), # Byte (8 bits) (unsigned char) (re.compile(br"\bVersion_t\b"), b"short"), # Class version identifier (short) (re.compile(br"\bOption_t\b"), b"const char"), # Option string (const char) (re.compile(br"\bSsiz_t\b"), b"int"), # String size (int) (re.compile(br"\bReal_t\b"), b"float"), # TVector and TMatrix element type (float) (re.compile(br"\bLong64_t\b"), b"long long"), # Portable signed long integer 8 bytes (re.compile(br"\bULong64_t\b"), b"unsigned long long"), # Portable unsigned long integer 8 bytes (re.compile(br"\bAxis_t\b"), b"double"), # Axis values type (double) (re.compile(br"\bStat_t\b"), b"double"), # Statistics type (double) (re.compile(br"\bFont_t\b"), b"short"), # Font number (short) (re.compile(br"\bStyle_t\b"), b"short"), # Style number (short) (re.compile(br"\bMarker_t\b"), b"short"), # Marker number (short) (re.compile(br"\bWidth_t\b"), b"short"), # Line width (short) (re.compile(br"\bColor_t\b"), b"short"), # Color number (short) (re.compile(br"\bSCoord_t\b"), b"short"), # Screen coordinates (short) (re.compile(br"\bCoord_t\b"), b"double"), # Pad world coordinates (double) (re.compile(br"\bAngle_t\b"), b"float"), # Graphics angle (float) (re.compile(br"\bSize_t\b"), b"float"), # Attribute size (float) ] class TStreamerInfo(ROOTObject): _classname = b"TStreamerInfo" classname = "TStreamerInfo" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) self._fName = _canonicaltype(_nametitle(source, cursor)[0]) self._fCheckSum, self._fClassVersion = cursor.fields(source, TStreamerInfo._format) self._fElements = _readobjany(source, cursor, context, parent) assert isinstance(self._fElements, list) _endcheck(start, cursor, cnt) return self _format = struct.Struct(">Ii") def show(self, stream=sys.stdout): out = "StreamerInfo for class: {0}, version={1}, checksum=0x{2:08x}\n{3}{4}".format(self._fName.decode("ascii"), self._fClassVersion, self._fCheckSum, "\n".join(" " + x.show(stream=None) for x in self._fElements), "\n" if len(self._fElements) > 0 else "") if stream is None: return out else: stream.write(out) stream.write("\n") class TStreamerElement(ROOTObject): _classname = b"TStreamerElement" classname = "TStreamerElement" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) self._fOffset = 0 # https://github.com/root-project/root/blob/master/core/meta/src/TStreamerElement.cxx#L505 self._fName, self._fTitle = _nametitle(source, cursor) self._fType, self._fSize, self._fArrayLength, self._fArrayDim = cursor.fields(source, TStreamerElement._format1) if self._classversion == 1: n = cursor.field(source, TStreamerElement._format2) self._fMaxIndex = cursor.array(source, n, ">i4") else: self._fMaxIndex = cursor.array(source, 5, ">i4") self._fTypeName = _canonicaltype(cursor.string(source)) if self._fType == 11 and (self._fTypeName == "Bool_t" or self._fTypeName == "bool"): self._fType = 18 if self._classversion <= 2: # FIXME # self._fSize = self._fArrayLength * gROOT->GetType(GetTypeName())->Size() pass self._fXmin, self._fXmax, self._fFactor = 0.0, 0.0, 0.0 if self._classversion == 3: self._fXmin, self._fXmax, self._fFactor = cursor.fields(source, TStreamerElement._format3) if self._classversion > 3: # FIXME # if (TestBit(kHasRange)) GetRange(GetTitle(),fXmin,fXmax,fFactor) pass _endcheck(start, cursor, cnt) return self _format1 = struct.Struct(">iiii") _format2 = struct.Struct(">i") _format3 = struct.Struct(">ddd") def show(self, stream=sys.stdout): out = "{0:15s} {1:15s} offset={2:3d} type={3:2d} {4}".format(self._fName.decode("ascii"), self._fTypeName.decode("ascii"), self._fOffset, self._fType, self._fTitle.decode("ascii")) if stream is None: return out else: stream.write(out) stream.write("\n") class TStreamerArtificial(TStreamerElement): _classname = b"TStreamerArtificial" classname = "TStreamerArtificial" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerArtificial, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self class TStreamerBase(TStreamerElement): _classname = b"TStreamerBase" classname = "TStreamerBase" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerBase, self)._readinto(self, source, cursor, context, parent) if self._classversion >= 2: self._fBaseVersion = cursor.field(source, TStreamerBase._format) _endcheck(start, cursor, cnt) return self _format = struct.Struct(">i") class TStreamerBasicPointer(TStreamerElement): _classname = b"TStreamerBasicPointer" classname = "TStreamerBasicPointer" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerBasicPointer, self)._readinto(self, source, cursor, context, parent) self._fCountVersion = cursor.field(source, TStreamerBasicPointer._format) self._fCountName = cursor.string(source) self._fCountClass = cursor.string(source) _endcheck(start, cursor, cnt) return self _format = struct.Struct(">i") class TStreamerBasicType(TStreamerElement): _classname = b"TStreamerBasicType" classname = "TStreamerBasicType" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerBasicType, self)._readinto(self, source, cursor, context, parent) if uproot3.const.kOffsetL < self._fType < uproot3.const.kOffsetP: self._fType -= uproot3.const.kOffsetL basic = True if self._fType in (uproot3.const.kBool, uproot3.const.kUChar, uproot3.const.kChar): self._fSize = 1 elif self._fType in (uproot3.const.kUShort, uproot3.const.kShort): self._fSize = 2 elif self._fType in (uproot3.const.kBits, uproot3.const.kUInt, uproot3.const.kInt, uproot3.const.kCounter): self._fSize = 4 elif self._fType in (uproot3.const.kULong, uproot3.const.kULong64, uproot3.const.kLong, uproot3.const.kLong64): self._fSize = 8 elif self._fType in (uproot3.const.kFloat, uproot3.const.kFloat16): self._fSize = 4 elif self._fType in (uproot3.const.kDouble, uproot3.const.kDouble32): self._fSize = 8 elif self._fType == uproot3.const.kCharStar: self._fSize = numpy.dtype(numpy.intp).itemsize else: basic = False if basic and self._fArrayLength > 0: self._fSize *= self._fArrayLength _endcheck(start, cursor, cnt) return self class TStreamerLoop(TStreamerElement): _classname = b"TStreamerLoop" classname = "TStreamerLoop" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerLoop, self)._readinto(self, source, cursor, context, parent) self._fCountVersion = cursor.field(source, TStreamerLoop._format) self._fCountName = cursor.string(source) self._fCountClass = cursor.string(source) _endcheck(start, cursor, cnt) return self _format = struct.Struct(">i") class TStreamerObject(TStreamerElement): _classname = b"TStreamerObject" classname = "TStreamerObject" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerObject, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self class TStreamerObjectAny(TStreamerElement): _classname = b"TStreamerObjectAny" classname = "TStreamerObjectAny" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerObjectAny, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self class TStreamerObjectAnyPointer(TStreamerElement): _classname = b"TStreamerObjectAnyPointer" classname = "TStreamerObjectAnyPointer" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerObjectAnyPointer, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self class TStreamerObjectPointer(TStreamerElement): _classname = b"TStreamerObjectPointer" classname = "TStreamerObjectPointer" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerObjectPointer, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self class TStreamerSTL(TStreamerElement): _classname = b"TStreamerSTL" classname = "TStreamerSTL" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerSTL, self)._readinto(self, source, cursor, context, parent) self._fSTLtype, self._fCtype = cursor.fields(source, TStreamerSTL._format) if self._fSTLtype == uproot3.const.kSTLmultimap or self._fSTLtype == uproot3.const.kSTLset: if self._fTypeName.startswith(b"std::set") or self._fTypeName.startswith(b"set"): self._fSTLtype = uproot3.const.kSTLset elif self._fTypeName.startswith(b"std::multimap") or self._fTypeName.startswith(b"multimap"): self._fSTLtype = uproot3.const.kSTLmultimap _endcheck(start, cursor, cnt) return self @classmethod def vector(cls, fType, fTypeName): self = cls.__new__(cls) self._fSTLtype = uproot3.const.kSTLvector self._fCtype = fType self._fTypeName = b"vector<" + fTypeName + b">" return self _format = struct.Struct(">ii") class TStreamerSTLstring(TStreamerSTL): _classname = b"TStreamerSTLstring" classname = "TStreamerSTLstring" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerSTLstring, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self class TStreamerString(TStreamerElement): _classname = b"TStreamerString" classname = "TStreamerString" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) super(TStreamerString, self)._readinto(self, source, cursor, context, parent) _endcheck(start, cursor, cnt) return self ################################################################ streamed classes (with some overrides) class ROOTStreamedObject(ROOTObject): _fields = [] @classmethod def _members(cls): out = [] for t in cls.__bases__: if issubclass(t, ROOTStreamedObject): out.extend(t._members()) out.extend(cls._fields) return out @classmethod def _recarray(cls): raise ValueError("not a recarray") @classmethod def _recarray_dtype(cls, cntvers=False, tobject=True): dtypesin = cls._recarray() dtypesout = [] used = set() allhidden = True for name, dtype in dtypesin: if name in used: i = 2 trial = name + str(i) while trial in used: i += 1 trial = name + str(i) name = trial if (cntvers or not (name == " cnt" or name == " vers")) and (tobject or not (name == " fUniqueID" or name == " fBits")): dtypesout.append((name, dtype)) used.add(name) if not name.startswith(" "): allhidden = False if allhidden: raise ValueError("not a recarray") return numpy.dtype(dtypesout) class TObject(ROOTStreamedObject): _classname = b"TObject" classname = "TObject" @classmethod def _recarray(cls): return [(" fBits", numpy.dtype(">u8")), (" fUniqueID", numpy.dtype(">u8"))] @classmethod def _readinto(cls, self, source, cursor, context, parent): _skiptobj(source, cursor) return self class TString(bytes, ROOTStreamedObject): _classname = b"TString" classname = "TString" @classmethod def _readinto(cls, self, source, cursor, context, parent): return TString(cursor.string(source)) def __str__(self): return self.decode("utf-8", "replace") class TNamed(TObject): _classname = b"TNamed" classname = "TNamed" _fields = ["fName", "fTitle"] @classmethod def _recarray(cls): raise ValueError("not a recarray") @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) TObject._readinto(self, source, cursor, context, parent) self._fName = cursor.string(source) self._fTitle = cursor.string(source) _endcheck(start, cursor, cnt) return self class TObjArray(list, ROOTStreamedObject): _classname = b"TObjArray" classname = "TObjArray" @classmethod def read(cls, source, cursor, context, parent, asclass=None): if cls._copycontext: context = context.copy() out = cls.__new__(cls) out = cls._readinto(out, source, cursor, context, parent, asclass=asclass) out._postprocess(source, cursor, context, parent) return out @classmethod def _readinto(cls, self, source, cursor, context, parent, asclass=None): start, cnt, self._classversion = _startcheck(source, cursor) _skiptobj(source, cursor) name = cursor.string(source) size, low = cursor.fields(source, struct.Struct(">ii")) self.extend([_readobjany(source, cursor, context, parent, asclass=asclass) for i in range(size)]) _endcheck(start, cursor, cnt) return self class TObjString(bytes, ROOTStreamedObject): _classname = b"TObjString" classname = "TObjString" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) _skiptobj(source, cursor) string = cursor.string(source) _endcheck(start, cursor, cnt) return TObjString(string) def __str__(self): return self.decode("utf-8", "replace") class TList(list, ROOTStreamedObject): _classname = b"TList" classname = "TList" @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) _skiptobj(source, cursor) name = cursor.string(source) size = cursor.field(source, struct.Struct(">i")) for i in range(size): self.append(_readobjany(source, cursor, context, parent)) n = cursor.field(source, TList._format_n) # ignore option cursor.bytes(source, n) _endcheck(start, cursor, cnt) return self _format_n = struct.Struct(">B") class THashList(TList): _classname = b"THashList" classname = "THashList" @classmethod def _readinto(cls, self, source, cursor, context, parent): TList._readinto(self, source, cursor, context, parent) return self class TRef(ROOTStreamedObject): _classname = b"TRef" classname = "TRef" _format1 = struct.Struct(">xxIxxxxxx") def __init__(self, id): self.id = id @classmethod def _readinto(cls, self, source, cursor, context, parent): self.id = cursor.field(source, self._format1) return self def __repr__(self): return "<TRef {0}>".format(self.id) @classmethod def _recarray(cls): out = [] out.append(("pidf", ">u2")) out.append(("id", ">u4")) out.append((" other", "S6")) return out TRef._methods = TRef TRef._arraymethods = None TRef._fromrow = lambda row: TRef(row["id"]) class TRefArray(list, ROOTStreamedObject): _classname = b"TRefArray" classname = "TRefArray" _format1 = struct.Struct(">i") _dtype = numpy.dtype(">i4") @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) cursor.skip(10) self.name = cursor.string(source) self.length = cursor.field(source, self._format1) cursor.skip(6) self.extend(cursor.array(source, self.length, self._dtype)) _endcheck(start, cursor, cnt) return self @property def nbytes(self): return len(self) * self._dtype.itemsize def tobytes(self): return _tobytes(numpy.asarray(self, dtype=self._dtype)) def tostring(self): return self.tobytes() class TArray(list, ROOTStreamedObject): _classname = b"TArray" classname = "TArray" @classmethod def _readinto(cls, self, source, cursor, context, parent): length = cursor.field(source, TArray._format) self.extend(cursor.array(source, length, self._dtype)) return self _format = struct.Struct(">i") @property def nbytes(self): return len(self) * self._dtype.itemsize def tobytes(self): return _tobytes(numpy.asarray(self, dtype=self._dtype)) def tostring(self): return self.tobytes() class TArrayC(TArray): _classname = b"TArrayC" classname = "TArrayC" _dtype = numpy.dtype(">i1") class TArrayS(TArray): _classname = b"TArrayS" classname = "TArrayS" _dtype = numpy.dtype(">i2") class TArrayI(TArray): _classname = b"TArrayI" classname = "TArrayI" _dtype = numpy.dtype(">i4") class TArrayL(TArray): _classname = b"TArrayL" classname = "TArrayL" _dtype = numpy.dtype(numpy.int_).newbyteorder(">") class TArrayL64(TArray): _classname = b"TArrayL64" classname = "TArrayL64" _dtype = numpy.dtype(">i8") class TArrayF(TArray): _classname = b"TArrayF" classname = "TArrayF" _dtype = numpy.dtype(">f4") class TArrayD(TArray): _classname = b"TArrayD" classname = "TArrayD" _dtype = numpy.dtype(">f8") # FIXME: I want to generalize this. It's the first example of a class that doesn't # follow the usual pattern. The full 11 bytes are # # "40 00 00 07 00 00 1a a1 2f 10 00" # # I'm reasonably certain the first "40 00 00 07" is count with a kByteCountMask. # The next "00 00" probably isn't the version, since the streamer said it's version 1. # I'm also reasonably certain that the last byte is the fIOBits data. # That leaves 4 bytes unaccounted for. class ROOT_3a3a_TIOFeatures(ROOTStreamedObject): _classname = b"ROOT::TIOFeatures" classname = "ROOT::TIOFeatures" _fields = ["fIOBits"] @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) cursor.skip(4) self._fIOBits = cursor.field(source, ROOT_3a3a_TIOFeatures._format1) _endcheck(start, cursor, cnt) return self _format1 = struct.Struct(">B") class ROOT_3a3a_Experimental_3a3a_RNTuple(ROOTStreamedObject): _classname = b"ROOT::Experimental::RNTuple" classname = "ROOT::Experimental::RNTuple" _fields = ["fVersion", "fSize", "fSeekHeader", "fNBytesHeader", "fLenHeader", "fSeekFooter", "fNBytesFooter", "fLenFooter", "fReserved"] @classmethod def _readinto(cls, self, source, cursor, context, parent): start, cnt, self._classversion = _startcheck(source, cursor) cursor.skip(4) self._fVersion, self._fSize, self._fSeekHeader, self._fNBytesHeader, self._fLenHeader, self._fSeekFooter, self._fNBytesFooter, self._fLenFooter, self._fReserved = cursor.fields(source, ROOT_3a3a_Experimental_3a3a_RNTuple._format1) return self _format1 = struct.Struct(">IIQIIQIIQ") class Undefined(ROOTStreamedObject): _classname = None classname = None @classmethod def read(cls, source, cursor, context, parent, classname=None): if cls._copycontext: context = context.copy() out = cls.__new__(cls) out = cls._readinto(out, source, cursor, context, parent) out._postprocess(source, cursor, context, parent) out._classname = classname return out @classmethod def _readinto(cls, self, source, cursor, context, parent): self._cursor = cursor.copied() start, cnt, self._classversion = _startcheck(source, cursor) if cnt is None: raise TypeError("cannot read objects of type {0} and cannot even skip over this one (returning Undefined) because its size is not known\n in file: {1}".format("???" if self._classname is None else self._classname.decode("ascii"), context.sourcepath)) cursor.skip(cnt - 6) _endcheck(start, cursor, cnt) return self def __repr__(self): if self._classname is not None: return "<{0} (failed to read {1} version {2}) at 0x{3:012x}>".format(self.__class__.__name__, repr(self._classname), self._classversion, id(self)) else: return "<{0} at 0x{1:012x}>".format(self.__class__.__name__, id(self)) builtin_classes = {"uproot_methods": uproot_methods, "TObject": TObject, "TString": TString, "TNamed": TNamed, "TObjArray": TObjArray, "TObjString": TObjString, "TList": TList, "THashList": THashList, "TRef": TRef, "TArray": TArray, "TArrayC": TArrayC, "TArrayS": TArrayS, "TArrayI": TArrayI, "TArrayL": TArrayL, "TArrayL64": TArrayL64, "TArrayF": TArrayF, "TArrayD": TArrayD, "TRefArray": TRefArray, "ROOT_3a3a_TIOFeatures": ROOT_3a3a_TIOFeatures, "ROOT_3a3a_Experimental_3a3a_RNTuple": ROOT_3a3a_Experimental_3a3a_RNTuple} builtin_skip = {"TBranch": ["fBaskets"], "TTree": ["fUserInfo", "fBranchRef"]}
bsd-3-clause
2,335,658,211,488,084,500
44.516802
306
0.580968
false
mesosphere/dispatch
dispatch/job.py
1
1034
from __future__ import absolute_import, print_function import json import socket import urlparse import uuid from . import state class Job(object): resource = { "cpus": 0.01, "mem": 10 } def __init__(self, data): self.data = data self.id = str(uuid.uuid4()) self.port = None self.location = None self.running = False self.script, self.public_key = json.loads(self.data) def uris(self): # XXX - wrapper location shouldn't live here. base = "http://{0}:{1}".format( socket.gethostbyname(socket.gethostname()), state.ARGS.port) return [ urlparse.urljoin(base, "/static/wrapper.bash"), urlparse.urljoin(base, "/static/sshd_config"), urlparse.urljoin(base, "/job/{0}/script".format(self.id)), urlparse.urljoin(base, "/job/{0}/public_key".format(self.id)), ] def connection(self): return "{0}:{1}".format(self.location, self.port)
apache-2.0
6,089,966,219,099,346,000
24.85
74
0.5706
false
drak0viens/ccsocket
nfq_handler.py
1
9623
#!/usr/bin/python # -*- coding: UTF-8 -*- ## This file is part of ccsocket ## Copyright (C) Tomas Dragoun <[email protected]> ## This program is published under a GPLv3 license ######################################################## import nfqueue import sys import signal from multiprocessing import Process, Pipe, Lock from socket import AF_INET6 from scapy.all import * from scapy.layers.inet6 import ICMPv6Unknown from headers import IPv6ExtHdrAH from constants import Constants ############################ ## ## ## NFQHandler ## ## ## ############################ class NFQHandler(Process): #---------------------------------------------------------------------------------- ''' This class handles netfilter queue. Is connected with a parent process via pipe. Messages are decoded and removed from incoming packets, data are send to pipe. In passive mode intercept queue both incoming outgo- ing traffic. Inherits multiprocessing.Process ''' #---------------------------------------------------------------------------------- def __init__(self, encoder, pipe, sendevt, stopevt, proto, active, address): ''' Call parent's constructor at first ''' Process.__init__(self) # init parent (multiprocessing.Process) self.name = 'NFQHandler-port ' + str(address[1]) self.daemon = True # set process daemonic ''' Initialize class attributes ''' self._const = Constants() self._encoder = encoder # encodes message in packet self._pipe = pipe # exchange data with parent process via pipe self._can_send = sendevt # event shared with parent process self._stop_send = stopevt # event shared with parent process self._proto = proto # upper-layer protocol self._active = active # mode self._host = address[0] self._port = address[1] ''' Folowing steps prepare netfilter queue with _port as queue number. There is always only one active queue associated with given number. ''' self._queue = nfqueue.queue() # create queue self._queue.open() # open queue try: self._queue.bind(AF_INET6) # set family type AF_INET6 except: # fails when any other queue already runs pass self._queue.set_callback(self.handlepacket) # set queue callback ''' Final step raises RuntimeError in case there is some other queue with the same number active, queue wasn't closed properly or user's priviledges are insufficient. ''' try: self._queue.create_queue(self._port) except Exception, e: raise e #---------------------------------------------------------------------------------- def __del__(self): if self._pipe: # close connection with parent process self._pipe.close() #---------------------------------------------------------------------------------- def destroyqueue(self): ''' Attempts to close queue ''' if self._queue: #print 'stopping queue ' + str(self._port) self._queue.close() # close queue self._queue = None #---------------------------------------------------------------------------------- def _clear(self): ''' Removes all data to send from pipe and sets state to idle ''' while self._pipe.poll(): # clear pipe self._pipe.recv() self._can_send.set() self._stop_send.clear() #---------------------------------------------------------------------------------- def run(self): ''' Runs endless loop. Every time a packet is occurs in queue _handlepacket method is called. ''' #print 'starting queue ' + str(self._port) self._queue.try_run() #---------------------------------------------------------------------------------- def handlepacket(self, number, payload): ''' Queue callback function ''' packet = IPv6(payload.get_data()) # decode packet from queue as IPv6 ''' Check if packet belongs to this queue - upperlayer ID field must match in active mode. ''' modify, reroute = self._checkport(packet) if not modify: ''' Reroute packet to correct queue. Verdict NF_QUEUE is 32-bit number. Lower 16 bits code this verdict and upper 16 bits are used to identify target queue. ''' if reroute != -1: error = payload.set_verdict(nfqueue.NF_QUEUE | (reroute << 16)) if not error: return ''' Packet doesn't have icmp echo layer or target port isn't active, accept packet ''' payload.set_verdict(nfqueue.NF_ACCEPT) return ''' Port is ok, we need to check if address matches. Ip6tables rules filter addresses, but packet might have been rerouted from other queue. ''' if len(self._host): # check source/destination address if packet.src != self._host and packet.dst != self._host: payload.set_verdict(nfqueue.NF_ACCEPT) return ''' Nfqueue mark is used to distinguish between incoming and outgoing packets. Each packet is marked. ''' mark = payload.get_nfmark() # get mark of this packet if mark == 1: # incoming packet self._incoming(packet, payload) elif mark == 2: # outgoing packet self._outgoing(packet, payload) #---------------------------------------------------------------------------------- def _incoming(self, packet, payload): message = self._encoder.getmessage(packet) # decode message if message is None: # no message ''' Accept packet ''' payload.set_verdict(nfqueue.NF_ACCEPT) else: ''' Remove message and pass modified packet to queue ''' modified_packet = self._encoder.removemessage(packet) payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(modified_packet), len(modified_packet)) try: if not len(message): return except: pass self._pipe.send((message, (packet.src, self._port, 0, 0))) #---------------------------------------------------------------------------------- def _outgoing(self, packet, payload): if self._stop_send.is_set(): self._clear() if self._pipe.poll(): # any data to send? message = self._pipe.recv() # get message ''' Encode message and return modified packet to queue ''' modified_packet = self._encoder.addmessage(message, (packet, None)) payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(modified_packet), len(modified_packet)) if not self._pipe.poll(): # sending finished self._can_send.set() else: # nothing to send, return packet to queue payload.set_verdict(nfqueue.NF_ACCEPT) #---------------------------------------------------------------------------------- def _checkport(self, packet): ''' Returns tuple (bool, value). True, if packet belongs to this queue. In pa- ssive mode always returns True. In active mode upperlayer id field must ma- tch current _port number. Value is number of queue where will be packet re- routed. ''' ''' Passive mode - override icmp id check ''' if not self._active: return (True, 0) ''' Active mode - check icmp (or fragment) id field (~ represents port) ''' if packet.haslayer(ICMPv6EchoRequest): # upperlayer ICMPv6EchoRequest id = packet[ICMPv6EchoRequest].id elif packet.haslayer(ICMPv6EchoReply): # upperlayer ICMPv6EchoReply id = packet[ICMPv6EchoReply].id elif packet.haslayer(IPv6ExtHdrFragment): # fragmented packet id = packet[IPv6ExtHdrFragment].id elif packet.haslayer(ICMPv6Unknown) and packet.haslayer(IPv6ExtHdrAH): type = packet[ICMPv6Unknown].type # ICMPv6 packet with AH if type != 128 and type != 129: return (False, -1) # accept packet packet[IPv6ExtHdrAH].decode_payload_as(ICMPv6EchoRequest) id = packet[ICMPv6EchoRequest].id elif self._proto == self._const.PROTO_ALL: # any protocol return (True, 0) # id matches port number else: return (False, -1) # accept packet if id == self._port: return (True, 0) # id matches port number else: return (False, id) # reroute to correct queue #----------------------------------------------------------------------------------
gpl-3.0
-3,009,712,564,005,565,400
45.713592
87
0.488309
false
davidsonpaul/python-bucket
import.py
1
6337
#!/usr/local/bin/python # # Simple Python (2.7.2) program that fetches bookmarks from your delicious account # and imports it into your pinboard account. # # Created this (my first python program, so it probably sucks, but it works!) after # getting repeated 'read failed' errors when trying to import my delicious export html # file into pinboard. # # Whilst writing and testing this, noticed that the delicious api was returning empty # description (title) values which are required by pinboard, which may have been the # original culprit. This program attempts to get the title from the original page # resource itself. If that fails, the title is set to 'default' as a last resort. # # Program parameters: # 1. delicious username # 2. delicous password # 3. pinboard username # 4. pinboard password # 5. verbose (Y/N) # # e.g. ./import.py dusr dpwd pusr ppwd Y # # NOTE: # 1. If title cannot be scraped from online resource, it's set to 'default'. # 2. If title contains non-ascii unicode characters these are not translated but ignored. # 3. Added a 9 second delay between pinboard bookmark posts to prevent throttling. This # is 3 times longer than what the pinboard API docs suggest, to be on the safe side. # # # The MIT License (MIT) # # Copyright (c) 2014 Paul Davidson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import base64 import httplib import lxml.html import sys import time import urllib import urllib2 import xml.etree.ElementTree as ET user_agent = 'Bookmarks-Import-App' pinboard_rate_limit = 9 delicious_username = sys.argv[1] delicious_password = sys.argv[2] pinboard_username = sys.argv[3] pinboard_password = sys.argv[4] verbose = True if sys.argv[5] == 'Y' else False class PinboardBookmark: def __init__(self, url, description, extended, tags, dt, private): self.url = urllib.quote(url) self.description = description # as of this writing delicious API is not including titles in responses for some reason # so we have to try and get these ourselves by scraping it directly from the resource if len(self.description) == 0: try: self.description = lxml.html.parse(url).find('.//title').text except: if verbose: print 'Error getting title for', url, '(so using default)', sys.exc_info()[0] self.description = 'default' # encoding title to ascii for inclusion as URL param # and ignoring translation of any non-ascii characters self.description = self.description.encode('ascii', 'ignore') self.description = urllib.quote(self.description) self.extended = extended if len(self.extended) > 0: self.extended = self.extended.encode('ascii', 'ignore') self.extended = urllib.quote(self.extended) self.tags = tags if len(self.tags) > 0: self.tags = self.tags.encode('ascii', 'ignore') self.tags = urllib.quote(tags) self.dt = dt if len(self.dt) > 0: self.dt = self.dt.encode('ascii', 'ignore') self.dt = urllib.quote(dt) self.shared = 'no' if private == 'yes' else 'yes' def print_bookmark(self): print 'Pinboard Bookmark: url: %s; title: %s; description: %s; tags: %s; dt: %s; shared: %s' % (self.url, self.description, self.extended, self.tags, self.dt, self.shared) print def add(self): uri = 'https://api.pinboard.in/v1/posts/add?url=%s&description=%s&extended=%s&tags=%s&dt=%s&shared=%s' % (self.url, self.description, self.extended, self.tags, self.dt, self.shared) request = urllib2.Request(uri) request.add_header('User-Agent', user_agent) request.add_header('Authorization', 'Basic ' + base64.b64encode('%s:%s' % (pinboard_username, pinboard_password))) opener = urllib2.build_opener() try: response = opener.open(request).read() except urllib2.HTTPError as e: print e.code print e.read() return if verbose: print response # Returns URI for *all* your bookmarks from delicious! def build_delicious_request(user, pwd): uri = 'https://api.delicious.com/v1/posts/all' request = urllib2.Request(uri) request.add_header('User-Agent', user_agent) request.add_header('Authorization', 'Basic ' + base64.b64encode('%s:%s' % (user, pwd))) return request def do_pinboard_import(): delicious_request = build_delicious_request(delicious_username, delicious_password) opener = urllib2.build_opener() try: bookmarksXML = opener.open(delicious_request).read() except urllib2.HTTPError as e: print e.code print e.read() return root = ET.fromstring(bookmarksXML) for child in root: pbookmark = PinboardBookmark(child.get('href'), child.get('description'), child.get('extended'), child.get('tag'), child.get('time'), child.get('private')) if verbose: pbookmark.print_bookmark() pbookmark.add() # add delay between requests for pinboard's benefit time.sleep(pinboard_rate_limit) def main(): print 'Starting import...' do_pinboard_import() print 'Import complete.' return if __name__ == '__main__': main()
mit
-4,337,314,912,735,955,000
35.843023
121
0.67745
false
jdgranberry/Word-Pair-Colocation-Frequency
word_freq_reducer.py
1
1341
#!/usr/bin/python ''' This reduction script is derived almost entirely from the reducer.py script found here: http://www.michael-noll.com/tutorials/writing-an-hadoop-mapreduce-program-in-python/ Thanks to Michael Noll for posting his Hadoop MapReduce with Python. I make no claims to have created this code. ''' from operator import itemgetter import sys current_word = None current_count = 0 word = None # input comes from STDIN for line in sys.stdin: # remove leading and trailing whitespace line = line.strip() # parse the input we got from mapper.py word, count = line.split('\t', 1) # convert count (currently a string) to int try: count = int(count) except ValueError: # count was not a number, so silenty ignore/discard line continue # this IF-switch only works because Hadoop sorts map output # by key (here: word) before it is passed to the reducer if current_word == word: current_count += count else: if current_word: # write result to STDOUT print '%s\t%s' % (current_word, current_count) current_count = count current_word = word # do not forget to output the last word if needed! if current_word == word: print '%s\t%s' % (current_word, current_count)
mit
361,622,477,208,370,940
29.477273
88
0.653244
false
easyNav/easyNav-pi
StartUpPk.py
1
4223
import subprocess import time import speaker import fileinput import os class StartUp(object): def __init__(self): self.speakery=speaker.newSpeaker() self.server="" self.dispatcher = "" self.nav = "" self.voice = "" self.serial = "" self.alert = "" self.cruncher = "" self.ctr = 0 def monitor(self): while(1): if(self.dispatcher.poll() != None): #process died self.speakery.say("Dispatcher Daemon Died!") self.speakery.say("restarting Dispatcher") self.dispatcher = self.startDispatcher() if(self.nav.poll() != None): #process died self.speakery.say("Navigation Daemon Died!") self.speakery.say("restarting Navigation") self.nav = self.startNav() if(self.voice.poll() != None): #process died self.speakery.say("Voice Daemon Died!") self.speakery.say("restarting Voice") self.voice = self.startVoice() if(self.serial.poll() != None): #process died self.speakery.say("Serial Daemon Died!") self.speakery.say("restarting Serial") self.serial = self.startSerial() if(self.alert.poll() != None): #process died self.speakery.say("Alert Daemon Died!") self.speakery.say("restarting Alert") self.alert = self.startAlert() if(self.cruncher.poll() != None): #process died self.speakery.say("Cruncher Daemon Died!") self.speakery.say("restarting Cruncher") self.cruncher = self.startCruncher() #check if there is a recv error in serial with open("serial.txt") as openfile2: for line in openfile2: if "error" in line: self.ctr+=1 self.speakery.say("Serial Daemon has an recieve error, restarting Serial Daemon") if(self.ctr == 1): self.ctr=0 os.system("sudo pkill -SIGTERM -f \"serialmod\"") break time.sleep(3) def startServer(self): serverStarted = False server = subprocess.Popen('sh start_node.sh > server.txt 2>&1', shell=True) self.speakery.say("Starting server, please wait") while(not serverStarted): with open("server.txt") as openfile: for line in openfile: for part in line.split(): #print part if "1337" in part: print part serverStarted = True openfile.close() time.sleep(0.1) self.speakery.say("Server is Up") return server def startDispatcher(self): dispatcher = subprocess.Popen('easyNav-pi-dispatcher > dispatcher.txt 2>&1', shell=True) self.speakery.say("Started dispatcher") return dispatcher def startNav(self): #startup Nav nav = subprocess.Popen('easyNav-pi-nav > navigation.txt', shell=True) self.speakery.say("Started Nav") return nav def startVoice(self): voice = subprocess.Popen('sudo python /home/pi/repos/easyNav-IO/voice.py', shell=True) self.speakery.say("Started Voice") return voice def startSerial(self): serial = subprocess.Popen('sudo python /home/pi/repos/easyNav-serial/sprotpy/serialmod.py' , shell=True) self.speakery.say("Started Serial") return serial def startAlert(self): alert = subprocess.Popen('sudo python /home/pi/repos/easyNav-serial/sprotpy/alert.py', shell=True) self.speakery.say("Started alert") return alert def startCruncher(self): cruncher = subprocess.Popen('sudo python /home/pi/repos/easyNav-gears2/Cruncher/cruncher.py pi', shell=True) self.speakery.say("Started cruncher") return cruncher def updateMap(self): subprocess.Popen("python /home/pi/repos/easyNav-pi-scripts/src/loadMaps.py > updateMap.txt 2>&1" , shell=True); self.speakery.say("Maps updated") def runMain(): startUp = StartUp() startUp.server = startUp.startServer() #recent inclusion, update map after server kicks in # startUp.updateMap(); # time.sleep(15) startUp.dispatcher = startUp.startDispatcher() time.sleep(3) startUp.nav = startUp.startNav() time.sleep(3) startUp.voice = startUp.startVoice() time.sleep(3) startUp.serial = startUp.startSerial() time.sleep(3) startUp.alert = startUp.startAlert() time.sleep(3) startUp.cruncher = startUp.startCruncher() time.sleep(3) #startUp.monitor() if __name__ == '__main__': runMain() #monitor forever #cruncher = subprocess.Popen('sudo python /home/pi/repos/easyNav-gears2/Cruncher/cruncher.py pi > cruncher.txt 2>&1', shell=True)
mit
2,544,761,257,595,453,400
27.153333
129
0.694293
false
vanam/clustering
clustering_system/clustering/igmm/DdCrpClustering.py
1
10933
import math import random from typing import List, Tuple, Callable import numpy as np import scipy.misc from clustering_system.clustering.ClusteringABC import CovarianceType from clustering_system.clustering.GibbsClusteringABC import GibbsClusteringABC from clustering_system.clustering.mixture.GaussianMixtureABC import NormalInverseWishartPrior from clustering_system.utils import draw_indexed from clustering_system.visualization.LikelihoodVisualizer import LikelihoodVisualizer def exponential_decay(d: float, a: float = 1): """ Decays the probability of linking to an earlier customer exponentially with the distance to the current customer. f(d) = exp(-d / a) / a :param d: distance (non-negative finite value) :param a: decay constant :return: decay """ return math.exp(-d / a) def window_decay(d: float, a: float): """ Only considers customers that are at most distance 'a' from the current customer. f(d) = 1/[d < a] :param d: distance (non-negative finite value) :param a: maximum distance :return: decay """ return 1 if d < a else 0 def logistic_decay(d: float, a: float): """ Logistic decay is a smooth version of the window decay. f(d) = 1 - 1 / (1 + exp(-d + a)) = exp(-d + a) / (1 + exp(-d + a)) :param d: distance (non-negative finite value) :param a: the x-value of the sigmoid's midpoint :return: decay """ return math.exp(-d + a) / (1 + math.exp(-d + a)) class DdCrpClustering(GibbsClusteringABC): """Clustering based on the Distance Dependent Chinese Restaurant Process""" def __init__(self, K: int, D: int, alpha: float, prior: NormalInverseWishartPrior, n_iterations: int, decay_function: Callable[[float], float], visualizer: LikelihoodVisualizer = None, covariance_type: CovarianceType = CovarianceType.full): """ :param K: Init number of clusters :param D: The length of a feature vector :param alpha: Hyperparameter of self assignment :param prior: Prior :param n_iterations: The number of iterations to perform each update :param decay_function: Decay function :param visualizer: Likelihood visualizer :param covariance_type: Covariance type """ super().__init__(D, alpha, prior, n_iterations, K_max=K, visualizer=visualizer, covariance_type=covariance_type) self.f = decay_function self.g = [] # undirected graph self.c = [] # customer assignments self.timestamps = [] self.likelihood_cache = {} def add_documents(self, vectors: np.ndarray, metadata: np.ndarray): """ Add documents represented by a list of vectors. :param vectors: A list of vectors :param metadata: A list of metadata """ for md, vector in zip(metadata, vectors): doc_id, timestamp, *_ = md # Add document at the end of arrays self.ids.append(doc_id) i = self.N self.c.append(i) # Customer is assigned to self self.g.append({i}) # Customer has a link to self self.mixture.new_vector(vector, self._get_new_cluster_number()) # Customer sits to his own table self.N += 1 # Increment number of documents (customers) self.K += 1 # Increment number of tables if self.N > self.K_max: self._remove_assignment(i) self._add_assignment(i, random.randint(0, i)) # Store timestamp of document (prior information) self.timestamps.append(timestamp / (60*60*24)) # Timestamp in days def _sample_document(self, i: int): """ Sample document i :param i: document id """ # Remove customer assignment for a document i self._remove_assignment(i) # Calculate customer assignment probabilities for each document (including self) probabilities = self._get_assignment_probabilities(i) # Convert indexed log probabilities to probabilities (softmax) ids, probabilities = zip(*probabilities) probabilities = np.exp(probabilities - scipy.misc.logsumexp(probabilities)) probabilities = list(zip(ids, probabilities)) # Sample new customer assignment c = draw_indexed(probabilities) # Link document to new customer self._add_assignment(i, c) def __iter__(self): """ For each document return (doc_id, cluster_id, linked_doc_id) """ for doc_id, cluster_id, c_i in zip(self.ids, self.mixture.z, self.c): yield doc_id, cluster_id, self.ids[c_i] def _add_assignment(self, i: int, c: int): """ Add new customer assignment c_i. :param i: document index :param c: new customer assignment (document index to which document i points to) """ # If we have to join tables if self.mixture.z[i] != self.mixture.z[c]: # Move customers to a table with smaller cluster number if self.mixture.z[i] > self.mixture.z[c]: table_to_join = self.mixture.z[i] table_no = self.mixture.z[c] else: table_to_join = self.mixture.z[c] table_no = self.mixture.z[i] self.reusable_numbers.put_nowait(table_to_join) # Make cluster number available self.K -= 1 # Go through people at table with higher number and move them to the other table self.mixture.merge(table_no, table_to_join) # Set new customer assignment self.c[i] = c # Update undirected graph self.g[i].add(c) self.g[c].add(i) def _remove_assignment(self, i: int) -> None: """ Remove customer assignment c_i. :param i: document index """ is_split = self._is_table_split(i) # Update undirected graph c = self.c[i] c_c = self.c[c] if c == i: # If self assignment self.g[i].remove(i) elif c_c == i: # If trivial cycle a <--> b breaks to a <-- b pass # Graph remains the same else: self.g[i].remove(c) self.g[c].remove(i) # Remove customer assignment c_i self.c[i] = -1 if is_split: new_table_no = self._get_new_cluster_number() # Move customers to a new table self.mixture.split(self.mixture.z[i], new_table_no, self._get_people_next_to(i)) # Increment number of tables self.K += 1 def _get_assignment_probabilities(self, i: int) -> List[Tuple[int, float]]: """ Get probabilities of assignment of document i to all documents available. Probabilities lower than the threshold are not returned. Always at least one probability (self assignment) is returned. :param i: customer index :return: list of tuples (document index, log assignment probability) """ probabilities = [] for c in range(self.N): prob = self._assignment_probability(i, c) probabilities.append((c, prob)) return probabilities def _assignment_probability(self, i: int, c: int) -> float: """ Return log probability of an assignment of document i to document c :param i: document index :param c: document index :return: Log probability of an assignment """ # If self assignment if i == c: return math.log(self.alpha) # Time distance between documents d = abs(self.timestamps[i] - self.timestamps[c]) # If not joining two tables if self.mixture.z[i] == self.mixture.z[c]: # Return ddCRP prior return math.log(self.f(d)) else: return math.log(self.f(d)) + self._likelihood_under_z(i, c) def _is_table_split(self, i: int): """ Does removal of c_i splits one table to two? :param i: customer index :return: Return False if c_i is on a cycle, True otherwise """ c = self.c[i] # If there is a trivial cycle a <--> a if c == i: return False # If there is a trivial cycle a <--> b if i == self.c[c]: return False # Traverse directed graph in search for a cycle which contains assignment c_i visited = {i} c = self.c[i] while c not in visited: visited.add(c) c = self.c[c] # Every vertex has only one neighbour # Return true if next customer is a starting customer if c == i: return False return True def _get_people_next_to(self, c: int): """ Get indices of customers siting with customer c at the same table :param c: customer index :return: Set of people transitively sitting next to customer c """ # Traverse undirected graph from customer i visited = set() stack = [c] while len(stack) > 0: c = stack.pop() if c not in visited: visited.add(c) stack.extend(self.g[c] - visited) return visited def _likelihood_under_z(self, i: int, c: int): """ The likelihood of the observations under the partition given by z(c) :param i: customer index :param c: customer assignment index :return: The likelihood of the observations """ table_k_members = frozenset(self._get_people_next_to(i)) table_l_members = frozenset(self._get_people_next_to(c)) table_kl_members = frozenset(table_k_members.union(table_l_members)) if table_k_members in self.likelihood_cache: table_k = self.likelihood_cache[table_k_members] else: table_k = self.mixture.get_marginal_likelihood(table_k_members) self.likelihood_cache[table_k_members] = table_k if table_l_members in self.likelihood_cache: table_l = self.likelihood_cache[table_l_members] else: table_l = self.mixture.get_marginal_likelihood(table_l_members) self.likelihood_cache[table_l_members] = table_l if table_kl_members in self.likelihood_cache: table_kl = self.likelihood_cache[table_kl_members] else: table_kl = self.mixture.get_marginal_likelihood(table_kl_members) self.likelihood_cache[table_kl_members] = table_kl return table_kl - table_k - table_l
mit
3,526,969,188,711,163,400
33.488959
120
0.584286
false
rafajaques/ExemplosPyGame
Caindo/caindo.py
1
2494
#coding:utf-8 import pygame, os, random from pygame.locals import * # Define a posição da janela os.environ['SDL_VIDEO_WINDOW_POS'] = "100,100" # Inicia o PyGame pygame.init() # Tela de 640x480 tela = pygame.display.set_mode((1024,768)) pygame.display.set_caption("Caindo!") # Máximo FPS do jogo fps = 80 # Encurta a variável de mouse mouse = pygame.mouse # Encurta a variável de clock clock = pygame.time.Clock() # Barrinha do jogador bar = pygame.Surface((80,20)) bar = bar.convert() bar.fill((0,0,255)) # Prepara o fundo do jogo para ser utilizado tela_tam = {"larg": 1024, "alt": 768} fundo = pygame.Surface((tela_tam["larg"],tela_tam["alt"])) fundo = fundo.convert() fundo.fill((200,200,200)) # Prepara uma partícula que cai :) particula = pygame.Surface((20,20)) particula = particula.convert() particula.fill((0,0,0)) # Partícula na tela? part_tela = False # Esconde o ponteiro do mouse mouse.set_visible(False) # Define a velocidade inicial veloc = 0.3 while True: # Verifica os eventos de teclado for evt in pygame.event.get(): # Se for pra sair (X no canto ou tecla ESC) if evt.type == QUIT or (evt.type == KEYDOWN and evt.key == K_ESCAPE): exit() tela.blit(fundo, (0,0)) # Pega o tempo do processador tempo = clock.tick(fps) # Gera uma partícula if not part_tela: part_pos = {"x": random.randint(1,tela_tam["larg"]-20), "y": 20} part_tela = True else: part_pos["y"] += tempo * veloc tela.blit(particula, (part_pos["x"], part_pos["y"])) # Pega a posição do mouse mouse_pos = mouse.get_pos() # Desenhamos a barra na tela, utilizando apenas a posição X do mouse, já que Y é fixo # e dependendo do tamanho da tela, Y também muda. if mouse_pos[0] < 0: mouse_x = 0 # Largura - 80 (que é a largura da barra) elif mouse_pos[0] > tela_tam["larg"] - 80: mouse_x = tela_tam["larg"] - 80 else: mouse_x = mouse_pos[0] # Deixamos 50 de espaço da base da tela até a barra mouse_y = tela_tam["alt"] - 50 # Desenha a barra tela.blit(bar, (mouse_x, mouse_y)) # Prepara os Rects da barra e da partícula bar_rect = pygame.Rect((mouse_x,mouse_y), (80,20)) part_rect = pygame.Rect((part_pos["x"], part_pos["y"]), (20,20)) # Detectar colisão if bar_rect.colliderect(part_rect): part_tela = False veloc += 0.1 # Verifica se a partícula saiu da tela if part_pos['y']>tela_tam['alt']: part_tela=False #depois da pra implementar alguma coisa a mais, como diminuir a velocidade, etc... pygame.display.flip()
mit
-550,836,296,841,306,700
23.245098
100
0.675293
false
meteorcloudy/tensorflow
tensorflow/contrib/eager/python/examples/revnet/blocks.py
1
12195
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Reversible residual network compatible with eager execution. Building blocks with manual backward gradient computation. Reference [The Reversible Residual Network: Backpropagation Without Storing Activations](https://arxiv.org/pdf/1707.04585.pdf) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import six import tensorflow as tf from tensorflow.contrib.eager.python.examples.revnet import ops class RevBlock(tf.keras.Model): """Single reversible block containing several `_Residual` blocks. Each `_Residual` block in turn contains two _ResidualInner blocks, corresponding to the `F`/`G` functions in the paper. """ def __init__(self, n_res, filters, strides, input_shape, batch_norm_first=False, data_format="channels_first", bottleneck=False, fused=True): """Initialize RevBlock. Args: n_res: number of residual blocks filters: list/tuple of integers for output filter sizes of each residual strides: length 2 list/tuple of integers for height and width strides input_shape: length 3 list/tuple of integers batch_norm_first: whether to apply activation and batch norm before conv data_format: tensor data format, "NCHW"/"NHWC" bottleneck: use bottleneck residual if True fused: use fused batch normalization if True """ super(RevBlock, self).__init__() self.blocks = tf.contrib.checkpoint.List() for i in range(n_res): curr_batch_norm_first = batch_norm_first and i == 0 curr_strides = strides if i == 0 else (1, 1) block = _Residual( filters, curr_strides, input_shape, batch_norm_first=curr_batch_norm_first, data_format=data_format, bottleneck=bottleneck, fused=fused) self.blocks.append(block) if data_format == "channels_first": input_shape = (filters, input_shape[1] // curr_strides[0], input_shape[2] // curr_strides[1]) else: input_shape = (input_shape[0] // curr_strides[0], input_shape[1] // curr_strides[1], filters) def call(self, h, training=True): """Apply reversible block to inputs.""" for block in self.blocks: h = block(h, training=training) return h def backward_grads_and_vars(self, x, y, dy, training=True): """Apply reversible block backward to outputs.""" grads_all = [] vars_all = [] for i in reversed(range(len(self.blocks))): block = self.blocks[i] if i == 0: y_inv = x else: # Don't update running stats when reconstructing activations vars_and_vals = block.get_moving_stats() y_inv = block.backward(y, training=training) block.restore_moving_stats(vars_and_vals) # Update running stats when computing gradients during training dy, grads, vars_ = block.backward_grads_and_vars( y_inv, dy, training=training) grads_all += grads vars_all += vars_ return dy, grads_all, vars_all class _Residual(tf.keras.Model): """Single residual block contained in a _RevBlock. Each `_Residual` object has two _ResidualInner objects, corresponding to the `F` and `G` functions in the paper. Args: filters: output filter size strides: length 2 list/tuple of integers for height and width strides input_shape: length 3 list/tuple of integers batch_norm_first: whether to apply activation and batch norm before conv data_format: tensor data format, "NCHW"/"NHWC", bottleneck: use bottleneck residual if True fused: use fused batch normalization if True """ def __init__(self, filters, strides, input_shape, batch_norm_first=True, data_format="channels_first", bottleneck=False, fused=True): super(_Residual, self).__init__() self.filters = filters self.strides = strides self.axis = 1 if data_format == "channels_first" else 3 if data_format == "channels_first": f_input_shape = (input_shape[0] // 2,) + input_shape[1:] g_input_shape = (filters // 2, input_shape[1] // strides[0], input_shape[2] // strides[1]) else: f_input_shape = input_shape[:2] + (input_shape[2] // 2,) g_input_shape = (input_shape[0] // strides[0], input_shape[1] // strides[1], filters // 2) factory = _BottleneckResidualInner if bottleneck else _ResidualInner self.f = factory( filters=filters // 2, strides=strides, input_shape=f_input_shape, batch_norm_first=batch_norm_first, data_format=data_format, fused=fused) self.g = factory( filters=filters // 2, strides=(1, 1), input_shape=g_input_shape, batch_norm_first=batch_norm_first, data_format=data_format, fused=fused) def call(self, x, training=True, concat=True): """Apply residual block to inputs.""" x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis) f_x2 = self.f(x2, training=training) # TODO(lxuechen): Replace with simpler downsampling x1_down = ops.downsample( x1, self.filters // 2, self.strides, axis=self.axis) x2_down = ops.downsample( x2, self.filters // 2, self.strides, axis=self.axis) y1 = f_x2 + x1_down g_y1 = self.g(y1, training=training) y2 = g_y1 + x2_down if not concat: # For correct backward grads return y1, y2 return tf.concat([y1, y2], axis=self.axis) def backward(self, y, training=True): """Reconstruct inputs from outputs; only valid when stride 1.""" assert self.strides == (1, 1) y1, y2 = tf.split(y, num_or_size_splits=2, axis=self.axis) g_y1 = self.g(y1, training=training) x2 = y2 - g_y1 f_x2 = self.f(x2, training=training) x1 = y1 - f_x2 return tf.concat([x1, x2], axis=self.axis) def backward_grads_and_vars(self, x, dy, training=True): """Manually compute backward gradients given input and output grads.""" with tf.GradientTape(persistent=True) as tape: x = tf.identity(x) # TODO(lxuechen): Remove after b/110264016 is fixed x1, x2 = tf.split(x, num_or_size_splits=2, axis=self.axis) tape.watch([x1, x2]) # Stitch back x for `call` so tape records correct grads x = tf.concat([x1, x2], axis=self.axis) dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self.axis) y1, y2 = self.call(x, training=training, concat=False) x2_down = ops.downsample( x2, self.filters // 2, self.strides, axis=self.axis) grads_combined = tape.gradient( y2, [y1] + self.g.trainable_variables, output_gradients=[dy2]) dy2_y1, dg = grads_combined[0], grads_combined[1:] dy1_plus = dy2_y1 + dy1 grads_combined = tape.gradient( y1, [x1, x2] + self.f.trainable_variables, output_gradients=[dy1_plus]) dx1, dx2, df = grads_combined[0], grads_combined[1], grads_combined[2:] dx2 += tape.gradient(x2_down, [x2], output_gradients=[dy2])[0] del tape grads = df + dg vars_ = self.f.trainable_variables + self.g.trainable_variables return tf.concat([dx1, dx2], axis=self.axis), grads, vars_ def get_moving_stats(self): vars_and_vals = {} def _is_moving_var(v): # pylint: disable=invalid-name n = v.name return n.endswith("moving_mean:0") or n.endswith("moving_variance:0") for v in filter(_is_moving_var, self.f.variables + self.g.variables): vars_and_vals[v] = v.read_value() return vars_and_vals def restore_moving_stats(self, vars_and_vals): for var_, val in six.iteritems(vars_and_vals): var_.assign(val) def _BottleneckResidualInner(filters, strides, input_shape, batch_norm_first=True, data_format="channels_first", fused=True): """Single bottleneck residual inner function contained in _Resdual. Corresponds to the `F`/`G` functions in the paper. Suitable for training on ImageNet dataset. Args: filters: output filter size strides: length 2 list/tuple of integers for height and width strides input_shape: length 3 list/tuple of integers batch_norm_first: whether to apply activation and batch norm before conv data_format: tensor data format, "NCHW"/"NHWC" fused: use fused batch normalization if True Returns: A keras model """ axis = 1 if data_format == "channels_first" else 3 model = tf.keras.Sequential() if batch_norm_first: model.add( tf.keras.layers.BatchNormalization( axis=axis, input_shape=input_shape, fused=fused)) model.add(tf.keras.layers.Activation("relu")) model.add( tf.keras.layers.Conv2D( filters=filters // 4, kernel_size=1, strides=strides, input_shape=input_shape, data_format=data_format, use_bias=False, padding="SAME")) model.add(tf.keras.layers.BatchNormalization(axis=axis, fused=fused)) model.add(tf.keras.layers.Activation("relu")) model.add( tf.keras.layers.Conv2D( filters=filters // 4, kernel_size=3, strides=(1, 1), data_format=data_format, use_bias=False, padding="SAME")) model.add(tf.keras.layers.BatchNormalization(axis=axis, fused=fused)) model.add(tf.keras.layers.Activation("relu")) model.add( tf.keras.layers.Conv2D( filters=filters, kernel_size=1, strides=(1, 1), data_format=data_format, use_bias=False, padding="SAME")) return model def _ResidualInner(filters, strides, input_shape, batch_norm_first=True, data_format="channels_first", fused=True): """Single residual inner function contained in _ResdualBlock. Corresponds to the `F`/`G` functions in the paper. Args: filters: output filter size strides: length 2 list/tuple of integers for height and width strides input_shape: length 3 list/tuple of integers batch_norm_first: whether to apply activation and batch norm before conv data_format: tensor data format, "NCHW"/"NHWC" fused: use fused batch normalization if True Returns: A keras model """ axis = 1 if data_format == "channels_first" else 3 model = tf.keras.Sequential() if batch_norm_first: model.add( tf.keras.layers.BatchNormalization( axis=axis, input_shape=input_shape, fused=fused)) model.add(tf.keras.layers.Activation("relu")) model.add( tf.keras.layers.Conv2D( filters=filters, kernel_size=3, strides=strides, input_shape=input_shape, data_format=data_format, use_bias=False, padding="SAME")) model.add(tf.keras.layers.BatchNormalization(axis=axis, fused=fused)) model.add(tf.keras.layers.Activation("relu")) model.add( tf.keras.layers.Conv2D( filters=filters, kernel_size=3, strides=(1, 1), data_format=data_format, use_bias=False, padding="SAME")) return model
apache-2.0
-6,062,346,454,275,266,000
32.687845
80
0.621812
false
sztomi/conan-msgpack
conanfile.py
1
1063
from conans import ConanFile, tools import os class MsgpackConan(ConanFile): name = "msgpack" version = "2.1.5" license = "Boost Software License 1.0" url = "https://github.com/sztomi/conan-msgpack" description = "The official C++ library for MessagePack" @property def extracted_dir(self): return "msgpack-{0}".format(self.version) def source(self): archive = "msgpack.tar.gz" tools.download("https://github.com/msgpack/msgpack-c/releases/download" "/cpp-{v}/msgpack-{v}.tar.gz".format(v=self.version), archive) tools.untargz(archive) def build(self): pass # silence warning def package(self): self.copy("*.h", dst="include", src=os.path.join(self.extracted_dir, "include")) self.copy("*.hpp", dst="include", src=os.path.join(self.extracted_dir, "include")) def package_info(self): self.cpp_info.includedirs.append(os.path.join(self.package_folder, "include"))
mit
-3,734,861,266,448,066,000
30.264706
86
0.600188
false
akittas/geocoder
geocoder/google_places.py
1
5188
#!/usr/bin/python # coding: utf8 from __future__ import absolute_import import time from geocoder.base import Base from geocoder.keys import google_key # todo: Paging (pagetoken) is not fully supported since we only return the first result. Need to return all results to the user so paging will make sense # todo: Add support for missing results fields html_attributions, opening_hours, photos, scope, alt_ids, types [not just the first one] # todo: Add support for nearbysearch and radarsearch variations of the Google Places API class Places(Base): """ Google Places API ==================== The Google Places API Web Service allows you to query for place information on a variety of categories, such as: establishments, prominent points of interest, geographic locations, and more. You can search for places either by proximity or a text string. A Place Search returns a list of places along with summary information about each place; additional information is available via a Place Details query. At this time, only the "Text Search" is supported by this library. "Text Search" can be used when you don't have pristine formatted addresses required by the regular Google Maps Geocoding API or when you want to do 'nearby' searches like 'restaurants near Sydney'. The Geocoding best practices reference indicates that when you have 'ambiguous queries in an automated system you would be better served using the Places API Text Search than the Maps Geocoding API https://developers.google.com/maps/documentation/geocoding/best-practices API Reference ------------- https://developers.google.com/places/web-service/intro https://developers.google.com/places/web-service/search l = geocoder.google('Elm Plaza Shopping Center, Enfield, CT 06082', method='places') l = geocoder.google('food near white house', method='places') l = geocoder.google('1st and main', method='places') Parameters ---------- :param query: Your search location or phrase you want geocoded. :param key: Your Google developers free key. :param location: (optional) lat,lng point around which results will be given preference :param radius: (optional) in meters, used with location :param language: (optional) 2-letter code of preferred language of returned address elements. :param minprice: (optional) 0 (most affordable) to 4 (most expensive) :param maxprice: (optional) 0 (most affordable) to 4 (most expensive) :param opennow: (optional) value is ignored. when present, closed places and places without opening hours will be omitted :param pagetoken: (optional) get next 20 results from previously run search. when set, other criteria are ignored :param type: (optional) restrict results to one type of place """ provider = 'google' method = 'places' def __init__(self, query, **kwargs): self.url = 'https://maps.googleapis.com/maps/api/place/textsearch/json' self.location = query self.params = { # required 'query': self.location, 'key': kwargs.get('key', google_key), # optional 'location': kwargs.get('location', ''), 'radius': kwargs.get('radius', ''), 'language': kwargs.get('language', ''), 'minprice': kwargs.get('minprice', ''), 'maxprice': kwargs.get('maxprice', ''), 'type': kwargs.get('type', ''), } # optional, don't send unless needed if 'opennow' in kwargs: self.params['opennow'] = '' # optional, don't send unless needed if 'pagetoken' in kwargs: self.params['pagetoken'] = kwargs['pagetoken'] self._initialize(**kwargs) def _exceptions(self): if self.parse['results']: self._build_tree(self.parse['results'][0]) @property def lat(self): return self.parse['location'].get('lat') @property def lng(self): return self.parse['location'].get('lng') @property def id(self): return self.parse.get('id') @property def reference(self): return self.parse.get('reference') @property def place_id(self): return self.parse.get('place_id') @property def type(self): type = self.parse.get('types') if type: return type[0] @property def address(self): return self.parse.get('formatted_address') @property def icon(self): return self.parse.get('icon') @property def name(self): return self.parse.get('name') @property def vicinity(self): return self.parse.get('vicinity') @property def price_level(self): return self.parse.get('price_level') @property def rating(self): return self.parse.get('rating') @property def next_page_token(self): return self.parse.get('next_page_token') @property def query(self): return self.location if __name__ == '__main__': g = Places('11 Wall Street, New York', method='places', key='<API KEY>') g.debug()
mit
7,096,630,817,115,258,000
33.818792
154
0.650925
false
rcmorehead/simplanets
simptest.py
1
1124
import matplotlib matplotlib.use('Agg') import simple_abc import simple_model #from astropy.io import ascii import numpy as np import pickle import pylab as plt import time from scipy import stats def main(): #TODO Choose new random seed after testing np.random.seed(917) steps = 5 eps = 0.25 min_part = 10 #stars = pickle.load(file('stars.pkl')) stars = pickle.load(file('stars_trimmed.pkl')) #obs = pickle.load(file('data.pkl')) model = simple_model.MyModel(stars) model.set_prior([stats.uniform(0.5, 1.0), stats.uniform(0, 1.0)]) theta = (0.513265306122, 0.1) obs = model.generate_data(theta) model.set_data(obs) n_procs = [1, 2, 3, 4, 5, 6, 7, 8] start = time.time() OT = simple_abc.pmc_abc(model, obs, epsilon_0=eps, min_particles=min_part, steps=steps, target_epsilon=eps, parallel=False, plot=True) end = time.time() print 'Serial took {}s'.format(end - start) out_pickle = file('simptest.pkl', 'w') pickle.dump(OT, out_pickle) out_pickle.close() if __name__ == '__main__': main()
mit
456,676,408,633,863,000
19.814815
91
0.622776
false
Implaier/CS50
PSET6/sentiments/helpers.py
1
3013
import os import sys import html import plotly import socket from twython import Twython from analyzer import Analyzer from twython import TwythonAuthError, TwythonError, TwythonRateLimitError def chart(positive, negative, neutral): """Return a pie chart for specified sentiments as HTML.""" # offline plot # https://plot.ly/python/pie-charts/ # https://plot.ly/python/reference/#pie figure = { "data": [ { "labels": ["positive", "negative", "neutral"], "hoverinfo": "none", "marker": { "colors": ["rgb(0,255,00)", "rgb(255,0,0)", "rgb(255,255,0)"] }, "type": "pie", "values": [positive, negative, neutral], } ], "layout": {"showlegend": True}, } return plotly.offline.plot( figure, output_type="div", show_link=False, link_text=False ) def get_user_timeline(screen_name, count=200): """Return list of most recent tweets posted by screen_name.""" # ensure count is valid if count < 1 or count > 200: raise RuntimeError("invalid count") # ensure environment variables are set if not os.environ.get("API_KEY"): raise RuntimeError("API_KEY not set") if not os.environ.get("API_SECRET"): raise RuntimeError("API_SECRET not set") # get screen_name's (or @screen_name's) most recent tweets # https://dev.twitter.com/rest/reference/get/users/lookup # https://dev.twitter.com/rest/reference/get/statuses/user_timeline # https://github.com/ryanmcgrath/twython/blob/master/twython/endpoints.py try: twitter = Twython(os.environ.get("API_KEY"), os.environ.get("API_SECRET")) user = twitter.lookup_user(screen_name=screen_name.lstrip("@")) if user[0]["protected"]: return None tweets = twitter.get_user_timeline(screen_name=screen_name, count=count) return [html.unescape(tweet["text"].replace("\n", " ")) for tweet in tweets] except TwythonAuthError: raise RuntimeError("invalid API_KEY and/or API_SECRET") from None except TwythonRateLimitError: raise RuntimeError("you've hit a rate limit") from None except TwythonError: return None def get_tweets_stats(tweets): """ Analyze tweets list for positive, negative and neutral and return overall stats :param tweets: list containing tweets :return: tuple with positive, negative and neutral score integers """ positive = negative = neutral = 0 positive_words = os.path.join(sys.path[0], "positive-words.txt") negative_words = os.path.join(sys.path[0], "negative-words.txt") analyzer = Analyzer(positive_words, negative_words) for tweet in tweets: score = analyzer.analyze(tweet) if score > 0: positive += 1 elif score < 0: negative += 1 else: neutral += 1 return positive, negative, neutral
unlicense
4,223,524,419,650,702,300
32.853933
84
0.622635
false
mlperf/training_results_v0.7
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/python/vta/build_module.py
1
3626
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=unused-argument """VTA specific buildin for runtime.""" from __future__ import absolute_import as _abs import tvm from . import ir_pass from .environment import get_env def lift_coproc_scope(x): """Lift coprocessings cope to the """ x = ir_pass.lift_alloc_to_scope_begin(x) x = tvm.ir_pass.LiftAttrScope(x, "coproc_scope", False) return x def early_rewrite(stmt): """Try to do storage rewrite in early pass.""" try: return tvm.ir_pass.StorageRewrite(stmt) except tvm.TVMError: return stmt def build_config(debug_flag=0, **kwargs): """Build a build config for VTA. Parameters ---------- debug_flag : int The dbeug flag to be passed. kwargs : dict Additional configurations. Returns ------- build_config: BuildConfig The build config that can be used in TVM. Example -------- .. code-block:: python # build a vta module. with vta.build_config(): vta_module = tvm.build(s, ...) """ env = get_env() def add_debug(stmt): debug = tvm.call_extern( "int32", "VTASetDebugMode", env.dev.command_handle, debug_flag) return tvm.make.stmt_seq(debug, stmt) pass_list = [(1, ir_pass.inject_dma_intrin), (1, ir_pass.inject_skip_copy), (1, ir_pass.annotate_alu_coproc_scope), (1, lambda x: tvm.ir_pass.LiftAttrScope(x, "coproc_uop_scope", True)), (1, lift_coproc_scope), (1, ir_pass.inject_coproc_sync), (1, early_rewrite)] if debug_flag: pass_list.append((1, add_debug)) pass_list.append((2, ir_pass.inject_alu_intrin)) pass_list.append((3, ir_pass.fold_uop_loop)) pass_list.append((3, ir_pass.cpu_access_rewrite)) return tvm.build_config(add_lower_pass=pass_list, **kwargs) def lower(*args, **kwargs): """Thin wrapper of tvm.lower This wrapper automatically applies VTA's build_config if there is no user specified build_config in context. See Also -------- tvm.lower : The original TVM's lower function """ cfg = tvm.build_module.current_build_config() if not cfg.add_lower_pass: with build_config(): return tvm.lower(*args, **kwargs) return tvm.lower(*args, **kwargs) def build(*args, **kwargs): """Thin wrapper of tvm.build This wrapper automatically applies VTA's build_config if there is no user specified build_config in context. See Also -------- tvm.build : The original TVM's build function """ cfg = tvm.build_module.current_build_config() if not cfg.add_lower_pass: with build_config(): return tvm.build(*args, **kwargs) return tvm.build(*args, **kwargs)
apache-2.0
7,045,586,717,280,269,000
29.728814
87
0.638169
false
tbabej/freeipa
ipaplatform/redhat/tasks.py
1
17147
# Authors: Simo Sorce <[email protected]> # Alexander Bokovoy <[email protected]> # Martin Kosek <[email protected]> # Tomas Babej <[email protected]> # # Copyright (C) 2007-2014 Red Hat # see file 'COPYING' for use and warranty information # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ''' This module contains default Red Hat OS family-specific implementations of system tasks. ''' from __future__ import print_function import os import socket import base64 import traceback from cffi import FFI from ctypes.util import find_library from functools import total_ordering from subprocess import CalledProcessError from nss.error import NSPRError from pyasn1.error import PyAsn1Error from six.moves import urllib from ipapython.ipa_log_manager import root_logger, log_mgr from ipapython import ipautil import ipapython.errors from ipalib import x509 # FIXME: do not import from ipalib from ipaplatform.constants import constants from ipaplatform.paths import paths from ipaplatform.redhat.authconfig import RedHatAuthConfig from ipaplatform.base.tasks import BaseTaskNamespace _ffi = FFI() _ffi.cdef(""" int rpmvercmp (const char *a, const char *b); """) # use ctypes loader to get correct librpm.so library version according to # https://cffi.readthedocs.org/en/latest/overview.html#id8 _librpm = _ffi.dlopen(find_library("rpm")) log = log_mgr.get_logger(__name__) def selinux_enabled(): """ Check if SELinux is enabled. """ if os.path.exists(paths.SELINUXENABLED): try: ipautil.run([paths.SELINUXENABLED]) return True except ipautil.CalledProcessError: # selinuxenabled returns 1 if not enabled return False else: # No selinuxenabled, no SELinux return False @total_ordering class IPAVersion(object): def __init__(self, version): self.version = version def __eq__(self, other): assert isinstance(other, IPAVersion) return _librpm.rpmvercmp(self.version, other.version) == 0 def __lt__(self, other): assert isinstance(other, IPAVersion) return _librpm.rpmvercmp(self.version, other.version) < 0 class RedHatTaskNamespace(BaseTaskNamespace): def restore_context(self, filepath, restorecon=paths.SBIN_RESTORECON): """ restore security context on the file path SELinux equivalent is /path/to/restorecon <filepath> restorecon's return values are not reliable so we have to ignore them (BZ #739604). ipautil.run() will do the logging. """ if not selinux_enabled(): return if (os.path.exists(restorecon)): ipautil.run([restorecon, filepath], raiseonerr=False) def check_selinux_status(self, restorecon=paths.RESTORECON): """ We don't have a specific package requirement for policycoreutils which provides restorecon. This is because we don't require SELinux on client installs. However if SELinux is enabled then this package is required. This function returns nothing but may raise a Runtime exception if SELinux is enabled but restorecon is not available. """ if not selinux_enabled(): return if not os.path.exists(restorecon): raise RuntimeError('SELinux is enabled but %s does not exist.\n' 'Install the policycoreutils package and start ' 'the installation again.' % restorecon) def restore_pre_ipa_client_configuration(self, fstore, statestore, was_sssd_installed, was_sssd_configured): auth_config = RedHatAuthConfig() if statestore.has_state('authconfig'): # disable only those configurations that we enabled during install for conf in ('ldap', 'krb5', 'sssd', 'sssdauth', 'mkhomedir'): cnf = statestore.restore_state('authconfig', conf) # Do not disable sssd, as this can cause issues with its later # uses. Remove it from statestore however, so that it becomes # empty at the end of uninstall process. if cnf and conf != 'sssd': auth_config.disable(conf) else: # There was no authconfig status store # It means the code was upgraded after original install # Fall back to old logic auth_config.disable("ldap") auth_config.disable("krb5") if not(was_sssd_installed and was_sssd_configured): # Only disable sssdauth. Disabling sssd would cause issues # with its later uses. auth_config.disable("sssdauth") auth_config.disable("mkhomedir") auth_config.execute() def set_nisdomain(self, nisdomain): # Let authconfig setup the permanent configuration auth_config = RedHatAuthConfig() auth_config.add_parameter("nisdomain", nisdomain) auth_config.execute() def modify_nsswitch_pam_stack(self, sssd, mkhomedir, statestore): auth_config = RedHatAuthConfig() if sssd: statestore.backup_state('authconfig', 'sssd', True) statestore.backup_state('authconfig', 'sssdauth', True) auth_config.enable("sssd") auth_config.enable("sssdauth") else: statestore.backup_state('authconfig', 'ldap', True) auth_config.enable("ldap") auth_config.enable("forcelegacy") if mkhomedir: statestore.backup_state('authconfig', 'mkhomedir', True) auth_config.enable("mkhomedir") auth_config.execute() def modify_pam_to_use_krb5(self, statestore): auth_config = RedHatAuthConfig() statestore.backup_state('authconfig', 'krb5', True) auth_config.enable("krb5") auth_config.add_option("nostart") auth_config.execute() def backup_auth_configuration(self, path): auth_config = RedHatAuthConfig() auth_config.backup(path) def restore_auth_configuration(self, path): auth_config = RedHatAuthConfig() auth_config.restore(path) def reload_systemwide_ca_store(self): try: ipautil.run([paths.UPDATE_CA_TRUST]) except CalledProcessError as e: root_logger.error( "Could not update systemwide CA trust database: %s", e) return False else: root_logger.info("Systemwide CA database updated.") return True def insert_ca_certs_into_systemwide_ca_store(self, ca_certs): new_cacert_path = paths.SYSTEMWIDE_IPA_CA_CRT if os.path.exists(new_cacert_path): try: os.remove(new_cacert_path) except OSError as e: root_logger.error( "Could not remove %s: %s", new_cacert_path, e) return False new_cacert_path = paths.IPA_P11_KIT try: f = open(new_cacert_path, 'w') except IOError as e: root_logger.info("Failed to open %s: %s" % (new_cacert_path, e)) return False f.write("# This file was created by IPA. Do not edit.\n" "\n") has_eku = set() for cert, nickname, trusted, ext_key_usage in ca_certs: try: subject = x509.get_der_subject(cert, x509.DER) issuer = x509.get_der_issuer(cert, x509.DER) serial_number = x509.get_der_serial_number(cert, x509.DER) public_key_info = x509.get_der_public_key_info(cert, x509.DER) except (NSPRError, PyAsn1Error, ValueError) as e: root_logger.warning( "Failed to decode certificate \"%s\": %s", nickname, e) continue label = urllib.parse.quote(nickname) subject = urllib.parse.quote(subject) issuer = urllib.parse.quote(issuer) serial_number = urllib.parse.quote(serial_number) public_key_info = urllib.parse.quote(public_key_info) cert = base64.b64encode(cert) cert = x509.make_pem(cert) obj = ("[p11-kit-object-v1]\n" "class: certificate\n" "certificate-type: x-509\n" "certificate-category: authority\n" "label: \"%(label)s\"\n" "subject: \"%(subject)s\"\n" "issuer: \"%(issuer)s\"\n" "serial-number: \"%(serial_number)s\"\n" "x-public-key-info: \"%(public_key_info)s\"\n" % dict(label=label, subject=subject, issuer=issuer, serial_number=serial_number, public_key_info=public_key_info)) if trusted is True: obj += "trusted: true\n" elif trusted is False: obj += "x-distrusted: true\n" obj += "%s\n\n" % cert f.write(obj) if ext_key_usage is not None and public_key_info not in has_eku: if not ext_key_usage: ext_key_usage = {x509.EKU_PLACEHOLDER} try: ext_key_usage = x509.encode_ext_key_usage(ext_key_usage) except PyAsn1Error as e: root_logger.warning( "Failed to encode extended key usage for \"%s\": %s", nickname, e) continue value = urllib.parse.quote(ext_key_usage) obj = ("[p11-kit-object-v1]\n" "class: x-certificate-extension\n" "label: \"ExtendedKeyUsage for %(label)s\"\n" "x-public-key-info: \"%(public_key_info)s\"\n" "object-id: 2.5.29.37\n" "value: \"%(value)s\"\n\n" % dict(label=label, public_key_info=public_key_info, value=value)) f.write(obj) has_eku.add(public_key_info) f.close() # Add the CA to the systemwide CA trust database if not self.reload_systemwide_ca_store(): return False return True def remove_ca_certs_from_systemwide_ca_store(self): result = True update = False # Remove CA cert from systemwide store for new_cacert_path in (paths.IPA_P11_KIT, paths.SYSTEMWIDE_IPA_CA_CRT): if not os.path.exists(new_cacert_path): continue try: os.remove(new_cacert_path) except OSError as e: root_logger.error( "Could not remove %s: %s", new_cacert_path, e) result = False else: update = True if update: if not self.reload_systemwide_ca_store(): return False return result def backup_and_replace_hostname(self, fstore, statestore, hostname): old_hostname = socket.gethostname() try: self.set_hostname(hostname) except ipautil.CalledProcessError as e: root_logger.debug(traceback.format_exc()) root_logger.error( "Failed to set this machine hostname to %s (%s).", old_hostname, e ) filepath = paths.ETC_HOSTNAME if os.path.exists(filepath): fstore.backup_file(filepath) # store old hostname statestore.backup_state('network', 'hostname', old_hostname) def restore_hostname(self, fstore, statestore): old_hostname = statestore.get_state('network', 'hostname') if old_hostname is not None: try: self.set_hostname(old_hostname) except ipautil.CalledProcessError as e: root_logger.debug(traceback.format_exc()) root_logger.error( "Failed to restore this machine hostname to %s (%s).", old_hostname, e ) filepath = paths.ETC_HOSTNAME if fstore.has_file(filepath): fstore.restore_file(filepath) def set_selinux_booleans(self, required_settings, backup_func=None): def get_setsebool_args(changes): args = [paths.SETSEBOOL, "-P"] args.extend(["%s=%s" % update for update in changes.items()]) return args if not selinux_enabled(): return False updated_vars = {} failed_vars = {} for setting, state in required_settings.items(): if state is None: continue try: result = ipautil.run( [paths.GETSEBOOL, setting], capture_output=True ) original_state = result.output.split()[2] if backup_func is not None: backup_func(setting, original_state) if original_state != state: updated_vars[setting] = state except ipautil.CalledProcessError as e: log.error("Cannot get SELinux boolean '%s': %s", setting, e) failed_vars[setting] = state if updated_vars: args = get_setsebool_args(updated_vars) try: ipautil.run(args) except ipautil.CalledProcessError: failed_vars.update(updated_vars) if failed_vars: raise ipapython.errors.SetseboolError( failed=failed_vars, command=' '.join(get_setsebool_args(failed_vars))) return True def create_system_user(self, name, group, homedir, shell, uid=None, gid=None, comment=None, create_homedir=False): """ Create a system user with a corresponding group According to https://fedoraproject.org/wiki/Packaging:UsersAndGroups?rd=Packaging/UsersAndGroups#Soft_static_allocation some system users should have fixed UID, GID and other parameters set. This values should be constant and may be hardcoded. Add other values for other users when needed. """ if name == constants.PKI_USER: if uid is None: uid = 17 if gid is None: gid = 17 if comment is None: comment = 'CA System User' if name == constants.DS_USER: if comment is None: comment = 'DS System User' super(RedHatTaskNamespace, self).create_system_user(name, group, homedir, shell, uid, gid, comment, create_homedir) def parse_ipa_version(self, version): """ :param version: textual version :return: object implementing proper __cmp__ method for version compare """ return IPAVersion(version) def configure_httpd_service_ipa_conf(self): """Create systemd config for httpd service to work with IPA """ if not os.path.exists(paths.SYSTEMD_SYSTEM_HTTPD_D_DIR): os.mkdir(paths.SYSTEMD_SYSTEM_HTTPD_D_DIR, 0o755) ipautil.copy_template_file( os.path.join(ipautil.SHARE_DIR, 'ipa-httpd.conf.template'), paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF, dict( KRB5CC_HTTPD=paths.KRB5CC_HTTPD, KDCPROXY_CONFIG=paths.KDCPROXY_CONFIG, IPA_HTTPD_KDCPROXY=paths.IPA_HTTPD_KDCPROXY, POST='-{kdestroy} -A'.format(kdestroy=paths.KDESTROY) ) ) os.chmod(paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF, 0o644) self.restore_context(paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF) def remove_httpd_service_ipa_conf(self): """Remove systemd config for httpd service of IPA""" try: os.unlink(paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF) except OSError as e: root_logger.error( 'Error removing %s: %s', paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF, e ) def set_hostname(self, hostname): ipautil.run([paths.BIN_HOSTNAMECTL, 'set-hostname', hostname]) tasks = RedHatTaskNamespace()
gpl-3.0
-7,307,145,778,736,525,000
35.251586
127
0.576252
false
clarete/curdling
tests/functional/test_wheel.py
1
2336
# Curdling - Concurrent package manager for Python # # Copyright (C) 2014 Lincoln Clarete <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from curdling.wheel import Wheel from . import FIXTURE def test_read_basic_fields(): "Wheel.from_file() Should parse a `.whl` archive" # Given the wheel present in our file system wheel_file = FIXTURE('storage2/gherkin-0.1.0-py27-none-any.whl') # When I parse it wheel = Wheel.from_file(wheel_file) # Then I see that the wheel file was successfuly read wheel.distribution.should.equal('gherkin') wheel.version.should.equal('0.1.0') wheel.build.should.be.none wheel.tags.pyver.should.equal('py27') wheel.tags.abi.should.be.none wheel.tags.arch.should.be.none def test_read_basic_fields(): """Wheel.from_file() Should parse the WHEEL file of the .whl archive The information inside of this file will be used as data source for the `Wheel.info()` method. """ # Given the wheel present in our file system wheel_file = FIXTURE('storage2/gherkin-0.1.0-py27-none-any.whl') # When I parse it wheel = Wheel.from_file(wheel_file) # Then I see that # And then I also see that the file WHEEL was correctly parsed wheel.info().should.equal({ 'Wheel-Version': '1.0', 'Generator': 'bdist_wheel (0.21.0)', 'Root-Is-Purelib': 'true', 'Tag': ['py27-none-any'], }) # # Then I see it should contain the follo # files = { # '/', ['blah.py'] # 'dist-info': [ # 'DESCRIPTION.rst', # 'pydist.json', # 'top_level.txt', # 'WHEEL', # 'METADATA', # 'RECORD', # ] # }
gpl-3.0
-8,828,636,233,160,780,000
31
72
0.650257
false
pingswept/pysolar
test/test_solar.py
1
10841
#!/usr/bin/python3 # Library for calculating location of the sun # Copyright Brandon Stafford # # This file is part of Pysolar. # # Pysolar is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Pysolar is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with Pysolar. If not, see <http://www.gnu.org/licenses/>. from pysolar import \ solar, \ constants, \ solartime as stime, \ elevation import datetime import unittest class TestSolar(unittest.TestCase): def setUp(self): self.d = datetime.datetime(2003, 10, 17, 19, 30, 30, tzinfo = datetime.timezone.utc) # only works with Python 3 self.d += datetime.timedelta(seconds = stime.get_delta_t(self.d) - stime.tt_offset - stime.get_leap_seconds(self.d)) # Reda & Andreas say that this time is in "Local Standard Time", which they # define as 7 hours behind UT (not UTC). Hence the adjustment to convert UT # to UTC. self.longitude = -105.1786 self.latitude = 39.742476 self.pressure = 82000.0 # pascals self.elevation = 1830.14 # meters self.temperature = 11.0 + constants.celsius_offset # kelvin self.slope = 30.0 # degrees self.slope_orientation = -10.0 # degrees east from south self.jd = stime.get_julian_solar_day(self.d) self.jc = stime.get_julian_century(self.jd) self.jde = stime.get_julian_ephemeris_day(self.d) self.jce = stime.get_julian_ephemeris_century(self.jde) self.jme = stime.get_julian_ephemeris_millennium(self.jce) self.geocentric_longitude = solar.get_geocentric_longitude(self.jme) self.geocentric_latitude = solar.get_geocentric_latitude(self.jme) self.nutation = solar.get_nutation(self.jce) self.sun_earth_distance = solar.get_sun_earth_distance(self.jme) self.true_ecliptic_obliquity = solar.get_true_ecliptic_obliquity(self.jme, self.nutation) self.aberration_correction = solar.get_aberration_correction(self.sun_earth_distance) self.apparent_sun_longitude = solar.get_apparent_sun_longitude(self.geocentric_longitude, self.nutation, self.aberration_correction) self.apparent_sidereal_time = solar.get_apparent_sidereal_time(self.jd, self.jme, self.nutation) self.geocentric_sun_right_ascension = solar.get_geocentric_sun_right_ascension(self.apparent_sun_longitude, self.true_ecliptic_obliquity, self.geocentric_latitude) self.geocentric_sun_declination = solar.get_geocentric_sun_declination(self.apparent_sun_longitude, self.true_ecliptic_obliquity, self.geocentric_latitude) self.local_hour_angle = solar.get_local_hour_angle(318.5119, self.longitude, self.geocentric_sun_right_ascension) #self.apparent_sidereal_time only correct to 5 sig figs, so override self.equatorial_horizontal_parallax = solar.get_equatorial_horizontal_parallax(self.sun_earth_distance) self.projected_radial_distance = solar.get_projected_radial_distance(self.elevation, self.latitude) self.projected_axial_distance = solar.get_projected_axial_distance(self.elevation, self.latitude) self.topocentric_sun_right_ascension = solar.get_topocentric_sun_right_ascension(self.projected_radial_distance, self.equatorial_horizontal_parallax, self.local_hour_angle, self.apparent_sun_longitude, self.true_ecliptic_obliquity, self.geocentric_latitude) self.parallax_sun_right_ascension = solar.get_parallax_sun_right_ascension(self.projected_radial_distance, self.equatorial_horizontal_parallax, self.local_hour_angle, self.geocentric_sun_declination) self.topocentric_sun_declination = solar.get_topocentric_sun_declination(self.geocentric_sun_declination, self.projected_axial_distance, self.equatorial_horizontal_parallax, self.parallax_sun_right_ascension, self.local_hour_angle) self.topocentric_local_hour_angle = solar.get_topocentric_local_hour_angle(self.local_hour_angle, self.parallax_sun_right_ascension) self.topocentric_zenith_angle = solar.get_topocentric_zenith_angle(self.latitude, self.topocentric_sun_declination, self.topocentric_local_hour_angle, self.pressure, self.temperature) self.topocentric_azimuth_angle = solar.get_topocentric_azimuth_angle(self.topocentric_local_hour_angle, self.latitude, self.topocentric_sun_declination) self.incidence_angle = solar.get_incidence_angle(self.topocentric_zenith_angle, self.slope, self.slope_orientation, self.topocentric_azimuth_angle) self.pressure_with_elevation = elevation.get_pressure_with_elevation(1567.7) self.temperature_with_elevation = elevation.get_temperature_with_elevation(1567.7) def test_get_julian_solar_day(self): self.assertAlmostEqual(2452930.312847, self.jd, 6) # value from Reda and Andreas (2005) def test_get_julian_ephemeris_day(self): self.assertAlmostEqual(2452930.3136, self.jde, 4) # value not validated def test_get_julian_century(self): self.assertAlmostEqual(0.03792779869191517, self.jc, 12) # value not validated def test_get_julian_ephemeris_millennium(self): self.assertAlmostEqual(0.0037927819143886397, self.jme, 12) # value not validated def test_get_geocentric_longitude(self): # self.assertAlmostEqual(204.0182635175, self.geocentric_longitude, 10) # value from Reda and Andreas (2005) self.assertAlmostEqual(204.0182635175, self.geocentric_longitude, 4) # above fails with more accurate Julian Ephemeris correction def test_get_geocentric_latitude(self): # self.assertAlmostEqual(0.0001011219, self.geocentric_latitude, 9) # value from Reda and Andreas (2005) self.assertAlmostEqual(0.0001011219, self.geocentric_latitude, 8) # above fails with more accurate Julian Ephemeris correction def test_get_nutation(self): self.assertAlmostEqual(0.00166657, self.nutation['obliquity'], 8) # value from Reda and Andreas (2005) self.assertAlmostEqual(-0.00399840, self.nutation['longitude'], 8) # value from Reda and Andreas (2005) def test_get_sun_earth_distance(self): self.assertAlmostEqual(0.9965421031, self.sun_earth_distance, 6) # value from Reda and Andreas (2005) def test_get_true_ecliptic_obliquity(self): self.assertAlmostEqual(23.440465, self.true_ecliptic_obliquity, 6) # value from Reda and Andreas (2005) def test_get_aberration_correction(self): self.assertAlmostEqual(-0.005711359, self.aberration_correction, 9) # value not validated def test_get_apparent_sun_longitude(self): # self.assertAlmostEqual(204.0085537528, self.apparent_sun_longitude, 10) # value from Reda and Andreas (2005) self.assertAlmostEqual(204.0085537528, self.apparent_sun_longitude, 4) # above fails with more accurate Julian Ephemeris correction def test_get_apparent_sidereal_time(self): self.assertAlmostEqual(318.5119, self.apparent_sidereal_time, 2) # value derived from Reda and Andreas (2005) def test_get_geocentric_sun_right_ascension(self): self.assertAlmostEqual(202.22741, self.geocentric_sun_right_ascension, 4) # value from Reda and Andreas (2005) def test_get_geocentric_sun_declination(self): self.assertAlmostEqual(-9.31434, self.geocentric_sun_declination, 4) # value from Reda and Andreas (2005) def test_get_local_hour_angle(self): self.assertAlmostEqual(11.105900, self.local_hour_angle, 4) # value from Reda and Andreas (2005) def test_get_projected_radial_distance(self): self.assertAlmostEqual(0.7702006, self.projected_radial_distance, 6) # value not validated def test_get_topocentric_sun_right_ascension(self): self.assertAlmostEqual(202.22741, self.topocentric_sun_right_ascension, 3) # value from Reda and Andreas (2005) def test_get_parallax_sun_right_ascension(self): self.assertAlmostEqual(-0.0003659912761437859, self.parallax_sun_right_ascension, 12) # value not validated def test_get_topocentric_sun_declination(self): self.assertAlmostEqual(-9.316179, self.topocentric_sun_declination, 3) # value from Reda and Andreas (2005) def test_get_topocentric_local_hour_angle(self): self.assertAlmostEqual(11.10629, self.topocentric_local_hour_angle, 4) # value from Reda and Andreas (2005) def test_get_topocentric_zenith_angle(self): self.assertAlmostEqual(50.11162, self.topocentric_zenith_angle, 3) # value from Reda and Andreas (2005) def test_get_topocentric_azimuth_angle(self): # self.assertAlmostEqual(194.34024, self.topocentric_azimuth_angle, 5) # value from Reda and Andreas (2005) self.assertAlmostEqual(194.34024, self.topocentric_azimuth_angle, 4) # above fails with more accurate Julian Ephemeris correction def test_get_incidence_angle(self): self.assertAlmostEqual(25.18700, self.incidence_angle, 3) # value from Reda and Andreas (2005) def testPressureWithElevation(self): self.assertAlmostEqual(83855.90228, self.pressure_with_elevation, 4) def testTemperatureWithElevation(self): self.assertAlmostEqual(277.9600, self.temperature_with_elevation, 4) class TestApi(unittest.TestCase): test_when = datetime.datetime(2016, 12, 19, 23, 0, 0, tzinfo=datetime.timezone.utc) def testGetPosition(self): az, al = solar.get_position(59.6365662,12.5350953, TestApi.test_when) self.assertAlmostEqual(az, 357.1431414) self.assertAlmostEqual(al, -53.7672217) az, al = solar.get_position(-43, 172, TestApi.test_when) self.assertAlmostEqual(az, 50.50035708) self.assertAlmostEqual(al, 63.0922036) # From Greenwich az, al = solar.get_position(51.4826, 0, TestApi.test_when) self.assertAlmostEqual(az, 333.04037976) self.assertAlmostEqual(al, -59.83724345) def testGetAltitude(self): al = solar.get_altitude(-43, 172, TestApi.test_when) self.assertAlmostEqual(al, 63.0922036) def testGetAzimuth(self): az = solar.get_azimuth(-43, 172, TestApi.test_when) self.assertAlmostEqual(az, 50.50035708) def testGetAltitudeFast(self): # location is in NZ, use relevant timezone day = datetime.datetime( 2016, 12, 19, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(hours=12))) for hour in range(7, 19): when = day + datetime.timedelta(hours=hour) al = solar.get_altitude_fast(-43, 172, when) al_expected = solar.get_altitude(-43, 172, when) self.assertAlmostEqual(al, al_expected, delta=1) def testGetAzimuthFast(self): day = datetime.datetime( 2016, 12, 19, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(hours=12))) for hour in range(7, 19): when = day + datetime.timedelta(hours=hour) az = solar.get_azimuth_fast(-43, 172, when) az_expected = solar.get_azimuth(-43, 172, when) self.assertAlmostEqual(az, az_expected, delta=1.5) if __name__ == "__main__": unittest.main(verbosity=2)
gpl-3.0
-8,645,552,175,771,938,000
51.882927
233
0.765889
false
joopert/home-assistant
homeassistant/components/zha/__init__.py
1
4608
"""Support for Zigbee Home Automation devices.""" import logging import voluptuous as vol from homeassistant import config_entries, const as ha_const import homeassistant.helpers.config_validation as cv from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE # Loading the config flow file will register the flow from . import config_flow # noqa: F401 pylint: disable=unused-import from . import api from .core import ZHAGateway from .core.const import ( COMPONENTS, CONF_BAUDRATE, CONF_DATABASE, CONF_DEVICE_CONFIG, CONF_ENABLE_QUIRKS, CONF_RADIO_TYPE, CONF_USB_PATH, DATA_ZHA, DATA_ZHA_CONFIG, DATA_ZHA_DISPATCHERS, DATA_ZHA_GATEWAY, DEFAULT_BAUDRATE, DEFAULT_RADIO_TYPE, DOMAIN, RadioType, ) from .core.registries import establish_device_mappings DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema({vol.Optional(ha_const.CONF_TYPE): cv.string}) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_RADIO_TYPE, default=DEFAULT_RADIO_TYPE): cv.enum( RadioType ), CONF_USB_PATH: cv.string, vol.Optional(CONF_BAUDRATE, default=DEFAULT_BAUDRATE): cv.positive_int, vol.Optional(CONF_DATABASE): cv.string, vol.Optional(CONF_DEVICE_CONFIG, default={}): vol.Schema( {cv.string: DEVICE_CONFIG_SCHEMA_ENTRY} ), vol.Optional(CONF_ENABLE_QUIRKS, default=True): cv.boolean, } ) }, extra=vol.ALLOW_EXTRA, ) # Zigbee definitions CENTICELSIUS = "C-100" # Internal definitions _LOGGER = logging.getLogger(__name__) async def async_setup(hass, config): """Set up ZHA from config.""" hass.data[DATA_ZHA] = {} if DOMAIN not in config: return True conf = config[DOMAIN] hass.data[DATA_ZHA][DATA_ZHA_CONFIG] = conf if not hass.config_entries.async_entries(DOMAIN): hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={ CONF_USB_PATH: conf[CONF_USB_PATH], CONF_RADIO_TYPE: conf.get(CONF_RADIO_TYPE).value, }, ) ) return True async def async_setup_entry(hass, config_entry): """Set up ZHA. Will automatically load components to support devices found on the network. """ establish_device_mappings() for component in COMPONENTS: hass.data[DATA_ZHA][component] = hass.data[DATA_ZHA].get(component, {}) hass.data[DATA_ZHA] = hass.data.get(DATA_ZHA, {}) hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS] = [] config = hass.data[DATA_ZHA].get(DATA_ZHA_CONFIG, {}) if config.get(CONF_ENABLE_QUIRKS, True): # needs to be done here so that the ZHA module is finished loading # before zhaquirks is imported import zhaquirks # noqa: F401 pylint: disable=unused-import zha_gateway = ZHAGateway(hass, config, config_entry) await zha_gateway.async_initialize() device_registry = await hass.helpers.device_registry.async_get_registry() device_registry.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(CONNECTION_ZIGBEE, str(zha_gateway.application_controller.ieee))}, identifiers={(DOMAIN, str(zha_gateway.application_controller.ieee))}, name="Zigbee Coordinator", manufacturer="ZHA", model=zha_gateway.radio_description, ) for component in COMPONENTS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, component) ) api.async_load_api(hass) async def async_zha_shutdown(event): """Handle shutdown tasks.""" await hass.data[DATA_ZHA][DATA_ZHA_GATEWAY].shutdown() await hass.data[DATA_ZHA][DATA_ZHA_GATEWAY].async_update_device_storage() hass.bus.async_listen_once(ha_const.EVENT_HOMEASSISTANT_STOP, async_zha_shutdown) return True async def async_unload_entry(hass, config_entry): """Unload ZHA config entry.""" await hass.data[DATA_ZHA][DATA_ZHA_GATEWAY].shutdown() api.async_unload_api(hass) dispatchers = hass.data[DATA_ZHA].get(DATA_ZHA_DISPATCHERS, []) for unsub_dispatcher in dispatchers: unsub_dispatcher() for component in COMPONENTS: await hass.config_entries.async_forward_entry_unload(config_entry, component) del hass.data[DATA_ZHA] return True
apache-2.0
1,980,362,114,776,705,300
30.135135
88
0.645833
false
pizzapanther/Church-Source
churchsource/people/migrations/0001_initial.py
1
6849
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Household' db.create_table('people_household', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=150)), ('status', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)), ('anniversary', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('active', self.gf('django.db.models.fields.BooleanField')(default=True)), ('barcode', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), ('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('first_visit', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), )) db.send_create_signal('people', ['Household']) # Adding model 'Person' db.create_table('people_person', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('household', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['people.Household'])), ('fname', self.gf('django.db.models.fields.CharField')(max_length=150)), ('mname', self.gf('django.db.models.fields.CharField')(max_length=150, null=True, blank=True)), ('lname', self.gf('django.db.models.fields.CharField')(max_length=150)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)), ('gender', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)), ('role', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)), ('bdate', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('ddate', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('allergies', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), )) db.send_create_signal('people', ['Person']) # Adding model 'Address' db.create_table('people_address', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('household', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['people.Household'])), ('address1', self.gf('django.db.models.fields.CharField')(max_length=255)), ('address2', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=255)), ('state', self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2)), ('zipcode', self.gf('django.db.models.fields.CharField')(max_length=25)), ('atype', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)), ('notes', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), )) db.send_create_signal('people', ['Address']) def backwards(self, orm): # Deleting model 'Household' db.delete_table('people_household') # Deleting model 'Person' db.delete_table('people_person') # Deleting model 'Address' db.delete_table('people_address') models = { 'people.address': { 'Meta': {'ordering': "('address1',)", 'object_name': 'Address'}, 'address1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'atype': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Household']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notes': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2'}), 'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '25'}) }, 'people.household': { 'Meta': {'ordering': "('name',)", 'object_name': 'Household'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'anniversary': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'barcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'first_visit': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'}) }, 'people.person': { 'Meta': {'ordering': "('lname', 'fname')", 'object_name': 'Person'}, 'allergies': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'ddate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'fname': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'gender': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'}), 'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Household']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lname': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'mname': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'}) } } complete_apps = ['people']
gpl-3.0
422,170,590,105,335,230
63.009346
123
0.572784
false
toway/towaymeetups
mba/views/i/__init__.py
1
1160
#!/usr/bin/python # coding: utf-8 from datetime import datetime import deform import colander import jinja2 from deform import ValidationFailure from deform.widget import CheckedPasswordWidget from pyramid.view import view_config from pyramid.httpexceptions import HTTPForbidden from pyramid.httpexceptions import HTTPFound from pyramid.security import remember from pyramid.renderers import render_to_response from pyramid.encode import urlencode from formencode.validators import Email from pyramid.request import Response from kotti import get_settings from kotti.security import get_principals from kotti import DBSession from kotti.security import get_user from mba import _ from mba.utils.decorators import wrap_user from mba.views.admin.meetup import ActAddForm, ActEditForm from mba.views.review import ReviewEditForm, ReviewAddForm from mba.resources import MbaUser, Act, Review __author__ = 'sunset' __date__ = '20150105' __desc__ = u'个人中心' def includeme(config): config.include("mba.views.i.authentication") config.include("mba.views.i.invitationcode") config.include("mba.views.i.infobox") config.scan(__name__)
gpl-3.0
-5,247,334,569,874,274,000
24.6
58
0.799479
false
zenoss/ZenPacks.zenoss.PostgreSQL
ZenPacks/zenoss/PostgreSQL/parsers/server.py
1
1264
########################################################################### # # This program is part of Zenoss Core, an open source monitoring platform. # Copyright (C) 2011, Zenoss Inc. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 or (at your # option) any later version as published by the Free Software Foundation. # # For complete information please visit: http://www.zenoss.com/oss/ # ########################################################################### import json from Products.ZenRRD.CommandParser import CommandParser class server(CommandParser): def processResults(self, cmd, result): data = None try: data = json.loads(cmd.result.output) except ValueError: return dp_map = dict([(dp.id, dp) for dp in cmd.points]) for name, dp in dp_map.items(): if name in data: result.values.append((dp, data[name])) if 'events' in data: for event in data['events']: # Keys must be converted from unicode to str. event = dict((str(k), v) for k, v in event.iteritems()) result.events.append(event)
gpl-2.0
5,135,747,778,605,399,000
33.162162
75
0.55538
false
BrainiacBug/Python-scripts
robot_txt_finder_EST_top/analyse_robot_txts.py
1
2790
import os from collections import Counter # List for disallows from file disallow = [] def list_all_files(): """Lists all files in robots folder""" os.chdir("robots") robot_files = os.listdir(os.curdir) print "[*] Found number of files:" + str(len(robot_files)) return robot_files def open_txt_file(file_name): """Opens given file :param file_name: file name (str)""" txt_file = open(file_name, "r") for line in txt_file: if "Disallow:" in line: pure_line = line.rstrip('\n') disallow_line = pure_line.split(" ") try: disallow.append(disallow_line[1]) except IndexError: print "[!] Error on line: " + line.rstrip('\n') txt_file.close() def get_all_disallows_from_files(): """Opens all files in folder and gets disallow lines""" robot_files = list_all_files() for item in robot_files: open_txt_file(item) print "[*] All files processed!" print "[*] Items in disallow list: " + str(len(disallow)) os.chdir("..") def print_all_disallows_to_file(): """Makes file with all unique disallows from files""" print_text = "" item_list = Counter(disallow).keys() file_name = "unique_robot_txt_disallows.txt" for item in item_list: print_text = print_text + item + "\n" write_to_file(file_name, print_text) def print_top_20_disallow(): """Makes file with top 20 disallows from files""" print_text = "" item_list = Counter(disallow).most_common(20) file_name = "top_20_robot_txt_disallows.txt" for item in item_list: item_text = "Item: " + item[0] + " :: Count: " + str(item[1]) print_text = print_text + item_text + "\n" write_to_file(file_name, print_text) def print_rare_disallows(): """Makes file with all rare disallows from files""" # count 1=> print_text = "" item_list = Counter(disallow).items() file_name = "rare_robot_txt_disallows.txt" for item in item_list: if item[1] <= 1: print_text = print_text + item[0] + "\n" write_to_file(file_name, print_text) def write_to_file(file_name, file_data): """Write data to file. :param file_name: file name (str), :param file_data: data (str)""" robot_file = open(file_name, "a+") robot_file.write(file_data + "\n") robot_file.close() print "[*] File created: " + file_name if __name__ == '__main__': print "---------------------------------------------" print "- ROBOT.TXT DISALLOW FINDER AND ANALYSER -" print "---------------------------------------------" get_all_disallows_from_files() print_rare_disallows() print_top_20_disallow() print_all_disallows_to_file() print "[*] All done!"
mit
-4,199,506,048,067,817,500
29.326087
69
0.578136
false
gstiebler/odemis
src/odemis/driver/picoquant.py
1
33026
# -*- coding: utf-8 -*- ''' Created on 14 Apr 2016 @author: Éric Piel Copyright © 2016 Éric Piel, Delmic This file is part of Odemis. Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/. ''' from __future__ import division import Queue from ctypes import * import ctypes import logging import math import numpy from odemis import model, util from odemis.model import HwError import random import threading import time # Based on phdefin.h MAXDEVNUM = 8 HISTCHAN = 65536 # number of histogram channels TTREADMAX = 131072 # 128K event records MODE_HIST = 0 MODE_T2 = 2 MODE_T3 = 3 FEATURE_DLL = 0x0001 FEATURE_TTTR = 0x0002 FEATURE_MARKERS = 0x0004 FEATURE_LOWRES = 0x0008 FEATURE_TRIGOUT = 0x0010 FLAG_FIFOFULL = 0x0003 # T-modes FLAG_OVERFLOW = 0x0040 # Histomode FLAG_SYSERROR = 0x0100 # Hardware problem BINSTEPSMAX = 8 SYNCDIVMIN = 1 SYNCDIVMAX = 8 ZCMIN = 0 # mV ZCMAX = 20 # mV DISCRMIN = 0 # mV DISCRMAX = 800 # mV OFFSETMIN = 0 # ps OFFSETMAX = 1000000000 # ps SYNCOFFSMIN = -99999 # ps SYNCOFFSMAX = 99999 # ps CHANOFFSMIN = -8000 # ps CHANOFFSMAX = 8000 # ps ACQTMIN = 1 # ms ACQTMAX = 360000000 # ms (100*60*60*1000ms = 100h) PHR800LVMIN = -1600 # mV PHR800LVMAX = 2400 # mV HOLDOFFMAX = 210480 # ns class PHError(Exception): def __init__(self, errno, strerror, *args, **kwargs): super(PHError, self).__init__(errno, strerror, *args, **kwargs) self.args = (errno, strerror) self.errno = errno self.strerror = strerror def __str__(self): return self.args[1] class PHDLL(CDLL): """ Subclass of CDLL specific to 'PHLib' library, which handles error codes for all the functions automatically. """ def __init__(self): # TODO: also support loading the Windows DLL on Windows try: # Global so that its sub-libraries can access it CDLL.__init__(self, "libph300.so", RTLD_GLOBAL) except OSError: logging.error("Check that PicoQuant PHLib is correctly installed") raise def at_errcheck(self, result, func, args): """ Analyse the return value of a call and raise an exception in case of error. Follows the ctypes.errcheck callback convention """ # everything returns 0 on correct usage, and < 0 on error if result != 0: err_str = create_string_buffer(40) self.PH_GetErrorString(err_str, result) if result in PHDLL.err_code: raise PHError(result, "Call to %s failed with error %s (%d): %s" % (str(func.__name__), PHDLL.err_code[result], result, err_str.value)) else: raise PHError(result, "Call to %s failed with error %d: %s" % (str(func.__name__), result, err_str.value)) return result def __getitem__(self, name): func = super(PHDLL, self).__getitem__(name) # try: # except Exception: # raise AttributeError("Failed to find %s" % (name,)) func.__name__ = name func.errcheck = self.at_errcheck return func err_code = { - 1: "ERROR_DEVICE_OPEN_FAIL", - 2: "ERROR_DEVICE_BUSY", - 3: "ERROR_DEVICE_HEVENT_FAIL", - 4: "ERROR_DEVICE_CALLBSET_FAIL", - 5: "ERROR_DEVICE_BARMAP_FAIL", - 6: "ERROR_DEVICE_CLOSE_FAIL", - 7: "ERROR_DEVICE_RESET_FAIL", - 8: "ERROR_DEVICE_GETVERSION_FAIL", - 9: "ERROR_DEVICE_VERSION_MISMATCH", - 10: "ERROR_DEVICE_NOT_OPEN", - 11: "ERROR_DEVICE_LOCKED", - 16: "ERROR_INSTANCE_RUNNING", - 17: "ERROR_INVALID_ARGUMENT", - 18: "ERROR_INVALID_MODE", - 19: "ERROR_INVALID_OPTION", - 20: "ERROR_INVALID_MEMORY", - 21: "ERROR_INVALID_RDATA", - 22: "ERROR_NOT_INITIALIZED", - 23: "ERROR_NOT_CALIBRATED", - 24: "ERROR_DMA_FAIL", - 25: "ERROR_XTDEVICE_FAIL", - 26: "ERROR_FPGACONF_FAIL", - 27: "ERROR_IFCONF_FAIL", - 28: "ERROR_FIFORESET_FAIL", - 29: "ERROR_STATUS_FAIL", - 32: "ERROR_USB_GETDRIVERVER_FAIL", - 33: "ERROR_USB_DRIVERVER_MISMATCH", - 34: "ERROR_USB_GETIFINFO_FAIL", - 35: "ERROR_USB_HISPEED_FAIL", - 36: "ERROR_USB_VCMD_FAIL", - 37: "ERROR_USB_BULKRD_FAIL", - 64: "ERROR_HARDWARE_F01", - 65: "ERROR_HARDWARE_F02", - 66: "ERROR_HARDWARE_F03", - 67: "ERROR_HARDWARE_F04", - 68: "ERROR_HARDWARE_F05", - 69: "ERROR_HARDWARE_F06", - 70: "ERROR_HARDWARE_F07", - 71: "ERROR_HARDWARE_F08", - 72: "ERROR_HARDWARE_F09", - 73: "ERROR_HARDWARE_F10", - 74: "ERROR_HARDWARE_F11", - 75: "ERROR_HARDWARE_F12", - 76: "ERROR_HARDWARE_F13", - 77: "ERROR_HARDWARE_F14", - 78: "ERROR_HARDWARE_F15", } class PH300(model.Detector): """ Represents a PicoQuant PicoHarp 300. """ def __init__(self, name, role, device=None, children=None, daemon=None, disc_volt=None, zero_cross=None, **kwargs): """ device (None or str): serial number (eg, 1020345) of the device to use or None if any device is fine. children (dict str -> kwargs): the names of the detectors (detector0 and detector1 are valid) to the arguments. disc_volt (2 (0 <= float <= 0.8)): discriminator voltage for the APD 0 and 1 (in V) zero_cross (2 (0 <= float <= 2e-3)): zero cross voltage for the APD0 and 1 (in V) """ if children is None: children = {} if device == "fake": device = None self._dll = FakePHDLL() else: self._dll = PHDLL() self._idx = self._openDevice(device) if disc_volt is None: disc_volt = [0, 0] if zero_cross is None: zero_cross = [0, 0] super(PH300, self).__init__(name, role, daemon=daemon, **kwargs) # TODO: metadata for indicating the range? cf WL_LIST? # TODO: do we need TTTR mode? self.Initialise(MODE_HIST) self._swVersion = self.GetLibraryVersion() self._metadata[model.MD_SW_VERSION] = self._swVersion mod, partnum, ver = self.GetHardwareInfo() sn = self.GetSerialNumber() self._hwVersion = "%s %s %s (s/n %s)" % (mod, partnum, ver, sn) self._metadata[model.MD_HW_VERSION] = self._hwVersion self._metadata[model.MD_DET_TYPE] = model.MD_DT_NORMAL logging.info("Opened device %d (%s s/n %s)", self._idx, mod, sn) self.Calibrate() # Do basic set-up for things that should never be needed to change self.SetSyncDiv(1) # 1 = no divider TODO: needs to be a VA? # TODO: needs to be changeable? self.SetOffset(0) # To pass the raw count of each detector, we create children detectors. # It could also go into just separate DataFlow, but then it's difficult # to allow using these DataFlows in a standard way. self._detectors = {} for name, ckwargs in children.items(): if name == "detector0": i = 0 elif name == "detector1": i = 1 else: raise ValueError("") self._detectors[name] = PH300RawDetector(channel=i, parent=self, daemon=daemon, **ckwargs) self.children.value.add(self._detectors[name]) # dwellTime = measurement duration dt_rng = (ACQTMIN * 1e-3, ACQTMAX * 1e-3) # s self.dwellTime = model.FloatContinuous(1, dt_rng, unit="s") # Indicate first dim is time and second dim is (useless) X (in reversed order) self._metadata[model.MD_DIMS] = "XT" self._shape = (HISTCHAN, 1, 2**16) # Histogram is 32 bits, but only return 16 bits info # Set the CFD parameters (in mV) for i, (dv, zc) in enumerate(zip(disc_volt, zero_cross)): self.SetInputCFD(i, int(dv * 1000), int(zc * 1000)) tresbase, bs = self.GetBaseResolution() tres = self.GetResolution() pxd_rng = (tresbase * 1e-12, 2 ** (BINSTEPSMAX - 1) * tresbase * 1e-12) self.pixelDuration = model.FloatContinuous(tres, pxd_rng, unit="s", setter=self._setPixelDuration) self._metadata[model.MD_PIXEL_DUR] = tres res = self._shape[:2] self.resolution = model.ResolutionVA(res, (res, res), readonly=True) self.syncOffset = model.FloatContinuous(0, (SYNCOFFSMIN * 1e-12, SYNCOFFSMAX * 1e-12), unit="s", setter=self._setSyncOffset) # Make sure the device is synchronised and metadata is updated self._setSyncOffset(self.syncOffset.value) # Wrapper for the dataflow self.data = BasicDataFlow(self) # Note: Apparently, the hardware supports reading the data, while it's # still accumulating (ie, the acquisition is still running). # We don't support this feature for now, and if the user needs to see # the data building up, it shouldn't be costly (in terms of overhead or # noise) to just do multiple small acquisitions and do the accumulation # in software. # Alternatively, we could provide a second dataflow that sends the data # while it's building up. # Queue to control the acquisition thread: # * "S" to start # * "E" to end # * "T" to terminate self._genmsg = Queue.Queue() self._generator = threading.Thread(target=self._acquire, name="PicoHarp300 acquisition thread") self._generator.start() def _openDevice(self, sn=None): """ sn (None or str): serial number return (0 <= int < 8): device ID raises: HwError if the device doesn't exist or cannot be opened """ sn_str = create_string_buffer(8) for i in range(MAXDEVNUM): try: self._dll.PH_OpenDevice(i, sn_str) except PHError as ex: if ex.errno == -1: # ERROR_DEVICE_OPEN_FAIL == no device with this idx pass else: logging.warning("Failure to open device %d: %s", i, ex) continue if sn is None or sn_str.value == sn: return i else: logging.info("Skipping device %d, with S/N %s", i, sn_str.value) else: # TODO: if a PHError happened indicate the error in the message raise HwError("No PicoHarp300 found, check the device is turned on and connected to the computer") def terminate(self): model.Detector.terminate(self) self.stop_generate() if self._generator: self._genmsg.put("T") self._generator.join(5) self._generator = None self.CloseDevice() def CloseDevice(self): self._dll.PH_CloseDevice(self._idx) def GetLibraryVersion(self): ver_str = create_string_buffer(8) self._dll.PH_GetLibraryVersion(ver_str) return ver_str.value def Initialise(self, mode): """ mode (MODE_*) """ logging.debug("Initializing device %d", self._idx) self._dll.PH_Initialize(self._idx, mode) def GetHardwareInfo(self): mod = create_string_buffer(16) partnum = create_string_buffer(8) ver = create_string_buffer(8) self._dll.PH_GetHardwareInfo(self._idx, mod, partnum, ver) return (mod.value, partnum.value, ver.value) def GetSerialNumber(self): sn_str = create_string_buffer(8) self._dll.PH_GetSerialNumber(self._idx, sn_str) return sn_str.value def Calibrate(self): logging.debug("Calibrating device %d", self._idx) self._dll.PH_Calibrate(self._idx) def GetBaseResolution(self): """ Raw device time resolution, and binning return: res (0<=float): min duration of a bin in the histogram (in ps) binning code (0<=int): binning = 2**bc """ # TODO: check that binning is indeed the binning code: doesn't seem so (always 8?!) res = c_double() bs = c_int() self._dll.PH_GetBaseResolution(self._idx, byref(res), byref(bs)) return res.value, bs.value def GetResolution(self): """ Current time resolution, taking into account the binning return (0<=float): duration of a bin (in ps) """ res = c_double() self._dll.PH_GetResolution(self._idx, byref(res)) return res.value def SetInputCFD(self, channel, level, zc): """ Changes the Constant Fraction Discriminator channel (0 or 1) level (int) CFD discriminator level in millivolts zc (0<=int): CFD zero cross in millivolts """ assert(channel in {0, 1}) assert(DISCRMIN <= level <= DISCRMAX) assert(ZCMIN <= zc <= ZCMAX) self._dll.PH_SetInputCFD(self._idx, channel, level, zc) def SetSyncDiv(self, div): """ Changes the divider of the sync input (channel 0). This allows to reduce the sync input rate so that the period is at least as long as the dead time. Note: the count rate will need 100 ms to be valid again div (1, 2, 4, or 8): input rate divider applied at channel 0 """ assert(SYNCDIVMIN <= div <= SYNCDIVMAX) self._dll.PH_SetSyncDiv(self._idx, div) def SetSyncOffset(self, offset): """ This function can replace an adjustable cable delay. A positive offset corresponds to inserting a cable in the sync input. Note that this offset must not be confused with the histogram acquisition offset. offset (int): offset in ps """ assert(SYNCOFFSMIN <= offset <= SYNCOFFSMAX) self._dll.PH_SetSyncOffset(self._idx, offset) def SetOffset(self, offset): """ Changes the acquisition offset. The offset is subtracted from each start-stop measurement before it is used to address the histogram channel to be incremented. Therefore, increasing the offset means shifting the signal towards earlier times. Note: This offset only acts on the difference between ch1 and ch0 in histogramming and T3 mode. Do not confuse it with the input offsets. offset (0<=int): offset in ps """ assert(OFFSETMIN <= offset <= OFFSETMAX) self._dll.PH_SetOffset(self._idx, offset) def SetBinning(self, bc): """ bc (0<=int): binning code. Binning = 2**bc (IOW, 0 for binning 1, 3 for binning 8) """ assert(0 <= bc <= BINSTEPSMAX - 1) self._dll.PH_SetBinning(self._idx, bc) def SetStopOverflow(self, stop, stopcount): """ Make the device stop the whole measurement as soon as one bin reaches the given count (or disable that feature, in which case the bins will get clipped) stop (bool): True if it should stop on reaching the given count stopcount (0<int<=2**16-1): count at which to stop """ assert(0 <= stopcount <= 2**16 - 1) stop_ovfl = 1 if stop else 0 self._dll.PH_SetStopOverflow(self._idx, stop_ovfl, stopcount) def GetCountRate(self, channel): """ Note: need at least 100 ms per reading (otherwise will return the same value) channel (0 <= int <= 1): the input channel return (0<=int): counts/s """ # TODO: check if we need a lock (to avoid multithread access) rate = c_int() self._dll.PH_GetCountRate(self._idx, channel, byref(rate)) return rate.value def ClearHistMem(self, block=0): """ block (0 <= int): block number to clear """ assert(0 <= block) self._dll.PH_ClearHistMem(self._idx, block) def StartMeas(self, tacq): """ tacq (0<int): acquisition time in milliseconds """ assert(ACQTMIN <= tacq <= ACQTMAX) self._dll.PH_StartMeas(self._idx, tacq) def StopMeas(self): self._dll.PH_StopMeas(self._idx) def CTCStatus(self): """ Reports the status of the acquisition (CTC) Return (bool): True if the acquisition time has ended """ ctcstatus = c_int() self._dll.PH_CTCStatus(self._idx, byref(ctcstatus)) return ctcstatus.value > 0 def GetHistogram(self, block=0): """ block (0<=int): only useful if routing return numpy.array of shape (1, res): the histogram """ buf = numpy.empty((1, HISTCHAN), dtype=numpy.uint32) buf_ct = buf.ctypes.data_as(POINTER(c_uint32)) self._dll.PH_GetHistogram(self._idx, buf_ct, block) return buf def GetElapsedMeasTime(self): """ return 0<=float: time since the measurement started (in s) """ elapsed = c_double() # in ms self._dll.PH_GetElapsedMeasTime(self._idx, byref(elapsed)) return elapsed.value * 1e-3 def ReadFiFo(self, count): """ Warning, the device must be initialised in a special mode (T2 or T3) count (int < TTREADMAX): number of values to read return ndarray of uint32: can be shorter than count, even 0 length. each unint32 is a 'record'. The interpretation of the record depends on the mode. """ # From the doc (p. 31 & 32): # * Each T2 mode event record consists of 32 bits. # There are 4 bits for the channel number and 28 bits for the time-tag. # If the time tag overflows, a special overflow marker record is # inserted in the data stream. # * Each T3 mode event record consists of 32 bits. # There are 4 bits for the channel number, 12 bits for the start- # stop time and 16 bits for the sync counter. If the counter overflows, # a special overflow marker record is inserted in the data stream. # From the demo programs: markers are recorded as channel 0xf, in which # case the next 12 bits are the marker number. Marker 0 indicates the # counter overflow. # See also https://github.com/tsbischof/libpicoquant assert 0 < count < TTREADMAX buf = numpy.empty((count,), dtype=numpy.uint32) buf_ct = buf.ctypes.data_as(POINTER(c_uint32)) nactual = c_int() self._dll.PH_ReadFiFo(self._idx, buf_ct, count, byref(nactual)) # only return the values which were read # TODO: if it's really smaller (eg, 0), copy the data to avoid holding all the mem return buf[:nactual.value] def _setPixelDuration(self, pxd): # TODO: delay until the end of an acquisition tresbase, bs = self.GetBaseResolution() b = int(pxd * 1e12 / tresbase) # Only accept a power of 2 bs = int(math.log(b, 2)) self.SetBinning(bs) # Update metadata b = 2 ** bs pxd = tresbase * b pxd = self.GetResolution() * 1e-12 # ps -> s self._metadata[model.MD_PIXEL_DUR] = pxd return pxd def _setSyncOffset(self, offset): offset_ps = int(offset * 1e12) self.SetSyncOffset(offset_ps) offset = offset_ps * 1e-12 # convert the round-down in ps back to s self._metadata[model.MD_TIME_OFFSET] = offset return offset # Acquisition methods def start_generate(self): self._genmsg.put("S") def stop_generate(self): self._genmsg.put("E") def _get_acq_msg(self, **kwargs): """ Read one message from the acquisition queue return (str): message raises Queue.Empty: if no message on the queue """ msg = self._genmsg.get(**kwargs) if msg not in ("S", "E", "T"): logging.warning("Acq received unexpected message %s", msg) else: logging.debug("Acq received message %s", msg) return msg def _acq_wait_start(self): """ Blocks until the acquisition should start. Note: it expects that the acquisition is stopped. raise StopIteration: if a terminate message was received """ while True: state = self._get_acq_msg(block=True) if state == "T": raise StopIteration() # Check if there are already more messages on the queue try: state = self._get_acq_msg(block=False) if state == "T": raise StopIteration() except Queue.Empty: pass if state == "S": return def _acq_should_stop(self, timeout=None): """ Indicate whether the acquisition should now stop or can keep running. Note: it expects that the acquisition is running. timeout (0<float or None): how long to wait to check (if None, don't wait) return (bool): True if needs to stop, False if can continue raise StopIteration: if a terminate message was received """ try: if timeout is None: state = self._get_acq_msg(block=False) else: state = self._get_acq_msg(timeout=timeout) if state == "E": return True elif state == "T": raise StopIteration() except Queue.Empty: pass return False def _acquire(self): """ Acquisition thread Managed via the .genmsg Queue """ try: while True: # Wait until we have a start (or terminate) message self._acq_wait_start() # Keep acquiring while True: tacq = self.dwellTime.value tstart = time.time() tend = tstart + tacq ttimeout = tstart + tacq * 3 + 1 # Give a big margin for timeout # TODO: only allow to update the setting here (not during acq) md = self._metadata.copy() md[model.MD_ACQ_DATE] = tstart md[model.MD_DWELL_TIME] = tacq logging.debug("Starting new acquisition") # check if any message received before starting again if self._acq_should_stop(): logging.debug("Acquisition stopped") break self.ClearHistMem() self.StartMeas(int(tacq * 1e3)) # Wait for the acquisition to be done or until a stop or # terminate message comes must_stop = False try: now = tstart while now < ttimeout: twait = max(1e-3, min((tend - now) / 2, tacq / 2)) logging.debug("Waiting for %g s", twait) if self._acq_should_stop(twait): must_stop = True break # Is the data ready? if self.CTCStatus(): logging.debug("Acq complete") break now = time.time() else: logging.error("Acquisition timeout after %g s", now - tstart) # TODO: try to reset the hardware? continue finally: # Must always be called, whether the measurement finished or not self.StopMeas() if must_stop: logging.debug("Acquisition stopped") break # Read data and pass it data = self.GetHistogram() da = model.DataArray(data, md) self.data.notify(da) except StopIteration: logging.debug("Acquisition thread requested to terminate") except Exception: logging.exception("Failure in acquisition thread") else: logging.error("Acquisition thread ended without exception") logging.debug("Acquisition thread ended") @classmethod def scan(cls): """ returns (list of 2-tuple): name, kwargs (device) Note: it's obviously not advised to call this function if a device is already under use """ dll = PHDLL() sn_str = create_string_buffer(8) dev = [] for i in range(MAXDEVNUM): try: dll.PH_OpenDevice(i, sn_str) except PHError as ex: if ex.errno == -1: # ERROR_DEVICE_OPEN_FAIL == no device with this idx continue else: logging.warning("Failure to open existing device %d: %s", i, ex) # Still add it dev.append(("PicoHarp 300", {"device": sn_str.value})) return dev class PH300RawDetector(model.Detector): """ Represents a raw detector (eg, APD) accessed via PicoQuant PicoHarp 300. Cannot be directly created. It must be done via PH300 child. """ def __init__(self, name, role, channel, parent, **kwargs): """ channel (0 or 1): detector ID of the detector """ self._channel = channel super(PH300RawDetector, self).__init__(name, role, parent=parent, **kwargs) self._shape = (2**31,) # only one point, with (32 bits) int size self.data = BasicDataFlow(self) self._metadata[model.MD_DET_TYPE] = model.MD_DT_NORMAL self._generator = None def terminate(self): self.stop_generate() def start_generate(self): if self._generator is not None: logging.warning("Generator already running") return self._generator = util.RepeatingTimer(100e-3, # Fixed rate at 100ms self._generate, "Raw detector reading") self._generator.start() def stop_generate(self): if self._generator is not None: self._generator.cancel() self._generator = None def _generate(self): """ Read the current detector rate and make it a data """ # update metadata metadata = self._metadata.copy() metadata[model.MD_ACQ_DATE] = time.time() metadata[model.MD_DWELL_TIME] = 100e-3 # s # Read data and make it a DataArray d = self.parent.GetCountRate(self._channel) nd = numpy.array([d], dtype=numpy.int) img = model.DataArray(nd, metadata) # send the new image (if anyone is interested) self.data.notify(img) class BasicDataFlow(model.DataFlow): def __init__(self, detector): """ detector (PH300): the detector that the dataflow corresponds to """ model.DataFlow.__init__(self) self._detector = detector # start/stop_generate are _never_ called simultaneously (thread-safe) def start_generate(self): self._detector.start_generate() def stop_generate(self): self._detector.stop_generate() # Only for testing/simulation purpose # Very rough version that is just enough so that if the wrapper behaves correctly, # it returns the expected values. def _deref(p, typep): """ p (byref object) typep (c_type): type of pointer Use .value to change the value of the object """ # This is using internal ctypes attributes, that might change in later # versions. Ugly! # Another possibility would be to redefine byref by identity function: # byref= lambda x: x # and then dereferencing would be also identity function. return typep.from_address(addressof(p._obj)) def _val(obj): """ return the value contained in the object. Needed because ctype automatically converts the arguments to c_types if they are not already c_type obj (c_type or python object) """ if isinstance(obj, ctypes._SimpleCData): return obj.value else: return obj class FakePHDLL(object): """ Fake PHDLL. It basically simulates one connected device, which returns reasonable values. """ def __init__(self): self._idx = 0 self._mode = None self._sn = "10234567" self._base_res = 4 # ps self._bins = 0 # binning power self._syncdiv = 0 # start/ (expected) end time of the current acquisition (or None if not started) self._acq_start = None self._acq_end = None self._last_acq_dur = None # s def PH_OpenDevice(self, i, sn_str): if i == self._idx: sn_str.value = self._sn else: raise PHError(-1, PHDLL.err_code[-1]) # ERROR_DEVICE_OPEN_FAIL def PH_Initialize(self, i, mode): self._mode = mode def PH_CloseDevice(self, i): self._mode = None def PH_GetHardwareInfo(self, i, mod, partnum, ver): mod.value = "FakeHarp 300" partnum.value = "12345" ver.value = "2.0" def PH_GetLibraryVersion(self, ver_str): ver_str.value = "3.00" def PH_GetSerialNumber(self, i, sn_str): sn_str.value = self._sn def PH_Calibrate(self, i): pass def PH_GetCountRate(self, i, channel, p_rate): rate = _deref(p_rate, c_int) rate.value = random.randint(0, 5000) def PH_GetBaseResolution(self, i, p_resolution, p_binsteps): resolution = _deref(p_resolution, c_double) binsteps = _deref(p_binsteps, c_int) resolution.value = self._base_res binsteps.value = self._bins def PH_GetResolution(self, i, p_resolution): resolution = _deref(p_resolution, c_double) resolution.value = self._base_res * (2 ** self._bins) def PH_SetInputCFD(self, i, channel, level, zc): # TODO return def PH_SetSyncDiv(self, i, div): self._syncdiv = _val(div) def PH_SetSyncOffset(self, i, syncoffset): # TODO return def PH_SetStopOverflow(self, i, stop_ovfl, stopcount): return def PH_SetBinning(self, i, binning): self._bins = _val(binning) def PH_SetOffset(self, i, offset): # TODO return def PH_ClearHistMem(self, i, block): self._last_acq_dur = None def PH_StartMeas(self, i, tacq): if self._acq_start is not None: raise PHError(-16, PHDLL.err_code[-16]) self._acq_start = time.time() self._acq_end = self._acq_start + _val(tacq) * 1e-3 def PH_StopMeas(self, i): if self._acq_start is not None: self._last_acq_dur = self._acq_end - self._acq_start self._acq_start = None self._acq_end = None def PH_CTCStatus(self, i, p_ctcstatus): ctcstatus = _deref(p_ctcstatus, c_int) if self._acq_end > time.time(): ctcstatus.value = 0 # 0 if still running else: ctcstatus.value = 1 def PH_GetElapsedMeasTime(self, i, p_elapsed): elapsed = _deref(p_elapsed, c_double) if self._acq_start is None: elapsed.value = 0 else: elapsed.value = min(self._acq_end, time.time()) - self._acq_start def PH_GetHistogram(self, i, p_chcount, block): p = cast(p_chcount, POINTER(c_uint32)) ndbuffer = numpy.ctypeslib.as_array(p, (HISTCHAN,)) # make the max value dependent on the acquisition time if self._last_acq_dur is None: logging.warning("Simulator detected reading empty histogram") maxval = 0 else: dur = min(10, self._last_acq_dur) maxval = max(1, int(2 ** 16 * (dur / 10))) # 10 s -> full scale # Old numpy doesn't support dtype argument for randint ndbuffer[...] = numpy.random.randint(0, maxval + 1, HISTCHAN).astype(numpy.uint32)
gpl-2.0
-2,291,997,670,940,585,200
33.908034
226
0.569997
false
wakita-ncch/Radiomics
src/Radiomics/group3_glrl.py
1
3248
import pyximport import numpy as np pyximport.install(setup_args={'include_dirs':[np.get_include()]}, inplace=True) from _glrl_loop import _glrl_vector_loop from profiling_tools import time def glrl_vector_loop(image, direction, bin_width): # convert pixel intensities into gray levels wi bin_width = int(bin_width) image /= bin_width return _glrl_vector_loop(image, direction) class GLRL_Matrix: def __init__(self, image, direction, bin_width): bin_width = int(bin_width) image /= bin_width self.image = image self.glrl_matrix = _glrl_vector_loop(image, direction).astype(np.float) self.Ng = np.arange(1, self.glrl_matrix.shape[0] + 1).astype(np.float) self.Nr = np.arange(1, self.glrl_matrix.shape[1] + 1).astype(np.float) self.Np = float(len(self.image.ravel())) self.jj, self.ii = np.meshgrid(self.Nr, self.Ng) self.jj = self.jj.astype(np.float) self.ii = self.ii.astype(np.float) self.sum_matrix = float(np.sum(self.glrl_matrix)) #print "Ng: ", self.Ng #print "Nr: ", self.Nr #print "Np: ", self.Np #print "sum_matrix: ", self.sum_matrix #print "ii: ", self.ii #print "jj: ", self.jj #print "SRE: ", self.short_run_emphasis() #print "LRE: ", self.long_run_emphasis() #print "GLN: ", self.gray_level_non_uniformity() #print "RLN: ", self.run_length_non_uniformity() #print "RP: ", self.run_percentage() #print "LGLRE: ", self.low_gray_level_run_emphasis() #print "HGLRE: ", self.high_gray_level_rum_emphasis() #print "SRLGLE: ", self.short_run_low_gray_level_emphasis() #print "SRHGLE: ", self.short_run_high_gray_level_emphasis() #print "LRLGLE: ", self.long_run_low_gray_level_emphasis() #print "LRHGLE: ", self.long_run_high_gray_level_emphasis() def short_run_emphasis(self): return np.sum(self.glrl_matrix / self.jj**2.0) / self.sum_matrix def long_run_emphasis(self): return np.sum(self.jj ** 2 * self.glrl_matrix) / self.sum_matrix def gray_level_non_uniformity(self): return np.sum(np.sum(self.glrl_matrix, axis=1)**2) / self.sum_matrix def run_length_non_uniformity(self): return np.sum(np.sum(self.glrl_matrix, axis=0)**2) / self.sum_matrix def run_percentage(self): return np.sum(self.glrl_matrix) / self.sum_matrix def low_gray_level_run_emphasis(self): return np.sum(self.glrl_matrix / self.ii**2) / self.sum_matrix def high_gray_level_rum_emphasis(self): return np.sum(self.ii**2 * self.glrl_matrix) / self.sum_matrix def short_run_low_gray_level_emphasis(self): return np.sum(self.glrl_matrix / (self.ii**2 * self.jj**2)) / self.sum_matrix def short_run_high_gray_level_emphasis(self): return np.sum(self.ii**2 * self.glrl_matrix / self.jj**2) / self.sum_matrix def long_run_low_gray_level_emphasis(self): return np.sum(self.jj**2 * self.glrl_matrix / self.ii**2) / self.sum_matrix def long_run_high_gray_level_emphasis(self): return np.sum(self.ii**2 * self.jj**2 * self.glrl_matrix) / self.sum_matrix
mit
5,497,817,697,771,615,000
29.942857
85
0.623153
false
Bolt64/my_code
Code Snippets/daily_programmer/martian_spell_check.py
1
1701
#!/usr/bin/env python3 def offset_string(string, offset): rows=[ "qwertyuiop", "QWERTYUIOP", "asdfghjkl", "ASDFGHJKL", "zxcvbnm", "ZXCVBNM" ] new_string="" for char in string: alphabet=False for row in rows: if char in row: new_string+=row[(row.index(char)+offset)%len(row)] alphabet=True if not alphabet: new_string+=char return new_string def load_wordlist(filename): wordlist=set() for line in open(filename): wordlist.add(line.strip()) return wordlist def correct_sentence(sentence, wordlist): for word in sentence: if not word.lower() in wordlist: return False return True def get_all_sentences(sentence, offset_range, wordlist): if not sentence: yield [] else: if correct_sentence(sentence, wordlist): yield sentence else: for index,word in enumerate(sentence): if not word.lower() in wordlist: for offset in offset_range: new_word=offset_string(word, offset) if new_word.lower() in wordlist: for others in get_all_sentences(sentence[index+1:], offset_range, wordlist): yield sentence[:index]+[new_word]+others break def main(sentence, offset_range, wordlist_file): wordlist=load_wordlist(wordlist_file) sentence=sentence.split() for corrected in get_all_sentences(sentence, offset_range, wordlist): yield " ".join(corrected)
mit
2,700,639,331,335,054,000
29.927273
104
0.552616
false
ShashaQin/frappe
frappe/translate.py
1
19876
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals """ frappe.translate ~~~~~~~~~~~~~~~~ Translation tools for frappe """ import frappe, os, re, codecs, json from frappe.utils.jinja import render_include from frappe.utils import strip from jinja2 import TemplateError import itertools, operator def guess_language(lang_list=None): """Set `frappe.local.lang` from HTTP headers at beginning of request""" lang_codes = frappe.request.accept_languages.values() if not lang_codes: return frappe.local.lang guess = None if not lang_list: lang_list = get_all_languages() or [] for l in lang_codes: code = l.strip() if code in lang_list or code == "en": guess = code break # check if parent language (pt) is setup, if variant (pt-BR) if "-" in code: code = code.split("-")[0] if code in lang_list: guess = code break return guess or frappe.local.lang def get_user_lang(user=None): """Set frappe.local.lang from user preferences on session beginning or resumption""" if not user: user = frappe.session.user # via cache lang = frappe.cache().hget("lang", user) if not lang: # if defined in user profile user_lang = frappe.db.get_value("User", user, "language") if user_lang and user_lang!="Loading...": lang = get_lang_dict().get(user_lang, user_lang) or frappe.local.lang else: default_lang = frappe.db.get_default("lang") lang = default_lang or frappe.local.lang frappe.cache().hset("lang", user, lang or "en") return lang def set_default_language(language): """Set Global default language""" lang = get_lang_dict().get(language, language) frappe.db.set_default("lang", lang) frappe.local.lang = lang def get_all_languages(): """Returns all language codes ar, ch etc""" return [a.split()[0] for a in get_lang_info()] def get_lang_dict(): """Returns all languages in dict format, full name is the key e.g. `{"english":"en"}`""" return dict([[a[1], a[0]] for a in [a.split(None, 1) for a in get_lang_info()]]) def get_language_from_code(lang): return dict(a.split(None, 1) for a in get_lang_info()).get(lang) def get_lang_info(): """Returns a listified version of `apps/languages.txt`""" return frappe.cache().get_value("langinfo", lambda:frappe.get_file_items(os.path.join(frappe.local.sites_path, "languages.txt"))) def get_dict(fortype, name=None): """Returns translation dict for a type of object. :param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot` :param name: name of the document for which assets are to be returned. """ fortype = fortype.lower() cache = frappe.cache() asset_key = fortype + ":" + (name or "-") translation_assets = cache.hget("translation_assets", frappe.local.lang) or {} if not asset_key in translation_assets: if fortype=="doctype": messages = get_messages_from_doctype(name) elif fortype=="page": messages = get_messages_from_page(name) elif fortype=="report": messages = get_messages_from_report(name) elif fortype=="include": messages = get_messages_from_include_files() elif fortype=="jsfile": messages = get_messages_from_file(name) elif fortype=="boot": messages = get_messages_from_include_files() messages += frappe.db.sql("select 'DocType:', name from tabDocType") messages += frappe.db.sql("select 'Role:', name from tabRole") messages += frappe.db.sql("select 'Module:', name from `tabModule Def`") translation_assets[asset_key] = make_dict_from_messages(messages) translation_assets[asset_key].update(get_dict_from_hooks(fortype, name)) cache.hset("translation_assets", frappe.local.lang, translation_assets) return translation_assets[asset_key] def get_dict_from_hooks(fortype, name): translated_dict = {} hooks = frappe.get_hooks("get_translated_dict") for (hook_fortype, fortype_name) in hooks: if hook_fortype == fortype and fortype_name == name: for method in hooks[(hook_fortype, fortype_name)]: translated_dict.update(frappe.get_attr(method)()) return translated_dict def add_lang_dict(code): """Extracts messages and returns Javascript code snippet to be appened at the end of the given script :param code: Javascript code snippet to which translations needs to be appended.""" messages = extract_messages_from_code(code) messages = [message for pos, message in messages] code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages)) return code def make_dict_from_messages(messages, full_dict=None): """Returns translated messages as a dict in Language specified in `frappe.local.lang` :param messages: List of untranslated messages """ out = {} if full_dict==None: full_dict = get_full_dict(frappe.local.lang) for m in messages: if m[1] in full_dict: out[m[1]] = full_dict[m[1]] return out def get_lang_js(fortype, name): """Returns code snippet to be appended at the end of a JS script. :param fortype: Type of object, e.g. `DocType` :param name: Document name """ return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name)) def get_full_dict(lang): """Load and return the entire translations dictionary for a language from :meth:`frape.cache` :param lang: Language Code, e.g. `hi` """ if not lang: return {} # found in local, return! if frappe.local.lang_full_dict is not None: return frappe.local.lang_full_dict frappe.local.lang_full_dict = frappe.cache().hget("lang_full_dict", lang) if frappe.local.lang_full_dict is None: frappe.local.lang_full_dict = load_lang(lang) # only cache file translations in this frappe.cache().hset("lang_full_dict", lang, frappe.local.lang_full_dict) try: # get user specific transaltion data user_translations = get_user_translations(lang) except Exception: user_translations = None if user_translations: frappe.local.lang_full_dict.update(user_translations) return frappe.local.lang_full_dict def load_lang(lang, apps=None): """Combine all translations from `.csv` files in all `apps`""" out = {} for app in (apps or frappe.get_all_apps(True)): path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv") out.update(get_translation_dict_from_file(path, lang, app)) return out def get_translation_dict_from_file(path, lang, app): """load translation dict from given path""" cleaned = {} if os.path.exists(path): csv_content = read_csv_file(path) for item in csv_content: if len(item)==3: # with file and line numbers cleaned[item[1]] = strip(item[2]) elif len(item)==2: cleaned[item[0]] = strip(item[1]) else: raise Exception("Bad translation in '{app}' for language '{lang}': {values}".format( app=app, lang=lang, values=repr(item).encode("utf-8") )) return cleaned def get_user_translations(lang): out = frappe.cache().hget('lang_user_translations', lang) if out is None: out = {} for fields in frappe.get_all('Translation', fields= ["source_name", "target_name"],filters={'language_code': lang}): out.update({fields.source_name: fields.target_name}) frappe.cache().hset('lang_user_translations', lang, out) return out # def get_user_translation_key(): # return 'lang_user_translations:{0}'.format(frappe.local.site) def clear_cache(): """Clear all translation assets from :meth:`frappe.cache`""" cache = frappe.cache() cache.delete_key("langinfo") cache.delete_key("lang_full_dict") cache.delete_key("translation_assets") def get_messages_for_app(app): """Returns all messages (list) for a specified `app`""" messages = [] modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \ for m in frappe.local.app_modules[app]]) # doctypes if modules: for name in frappe.db.sql_list("""select name from tabDocType where module in ({})""".format(modules)): messages.extend(get_messages_from_doctype(name)) # pages for name, title in frappe.db.sql("""select name, title from tabPage where module in ({})""".format(modules)): messages.append((None, title or name)) messages.extend(get_messages_from_page(name)) # reports for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport where tabReport.ref_doctype = tabDocType.name and tabDocType.module in ({})""".format(modules)): messages.append((None, name)) messages.extend(get_messages_from_report(name)) for i in messages: if not isinstance(i, tuple): raise Exception # app_include_files messages.extend(get_all_messages_from_js_files(app)) # server_messages messages.extend(get_server_messages(app)) return deduplicate_messages(messages) def get_messages_from_doctype(name): """Extract all translatable messages for a doctype. Includes labels, Python code, Javascript code, html templates""" messages = [] meta = frappe.get_meta(name) messages = [meta.name, meta.module] if meta.description: messages.append(meta.description) # translations of field labels, description and options for d in meta.get("fields"): messages.extend([d.label, d.description]) if d.fieldtype=='Select' and d.options: options = d.options.split('\n') if not "icon" in options[0]: messages.extend(options) # translations of roles for d in meta.get("permissions"): if d.role: messages.append(d.role) messages = [message for message in messages if message] messages = [('DocType: ' + name, message) for message in messages if is_translatable(message)] # extract from js, py files doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name) messages.extend(get_messages_from_file(doctype_file_path + ".js")) messages.extend(get_messages_from_file(doctype_file_path + "_list.js")) messages.extend(get_messages_from_file(doctype_file_path + "_list.html")) messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js")) return messages def get_messages_from_page(name): """Returns all translatable strings from a :class:`frappe.core.doctype.Page`""" return _get_messages_from_page_or_report("Page", name) def get_messages_from_report(name): """Returns all translatable strings from a :class:`frappe.core.doctype.Report`""" report = frappe.get_doc("Report", name) messages = _get_messages_from_page_or_report("Report", name, frappe.db.get_value("DocType", report.ref_doctype, "module")) # TODO position here! if report.query: messages.extend([(None, message) for message in re.findall('"([^:,^"]*):', report.query) if is_translatable(message)]) messages.append((None,report.report_name)) return messages def _get_messages_from_page_or_report(doctype, name, module=None): if not module: module = frappe.db.get_value(doctype, name, "module") doc_path = frappe.get_module_path(module, doctype, name) messages = get_messages_from_file(os.path.join(doc_path, frappe.scrub(name) +".py")) if os.path.exists(doc_path): for filename in os.listdir(doc_path): if filename.endswith(".js") or filename.endswith(".html"): messages += get_messages_from_file(os.path.join(doc_path, filename)) return messages def get_server_messages(app): """Extracts all translatable strings (tagged with :func:`frappe._`) from Python modules inside an app""" messages = [] for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)): for dontwalk in (".git", "public", "locale"): if dontwalk in folders: folders.remove(dontwalk) for f in files: if f.endswith(".py") or f.endswith(".html") or f.endswith(".js"): messages.extend(get_messages_from_file(os.path.join(basepath, f))) return messages def get_messages_from_include_files(app_name=None): """Returns messages from js files included at time of boot like desk.min.js for desk and web""" messages = [] for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []): messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file))) return messages def get_all_messages_from_js_files(app_name=None): """Extracts all translatable strings from app `.js` files""" messages = [] for app in ([app_name] if app_name else frappe.get_installed_apps()): if os.path.exists(frappe.get_app_path(app, "public")): for basepath, folders, files in os.walk(frappe.get_app_path(app, "public")): if "frappe/public/js/lib" in basepath: continue for fname in files: if fname.endswith(".js") or fname.endswith(".html"): messages.extend(get_messages_from_file(os.path.join(basepath, fname))) return messages def get_messages_from_file(path): """Returns a list of transatable strings from a code file :param path: path of the code file """ apps_path = get_bench_dir() if os.path.exists(path): with open(path, 'r') as sourcefile: return [(os.path.relpath(" +".join([path, str(pos)]), apps_path), message) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(".py"))] else: # print "Translate: {0} missing".format(os.path.abspath(path)) return [] def extract_messages_from_code(code, is_py=False): """Extracts translatable srings from a code file :param code: code from which translatable files are to be extracted :param is_py: include messages in triple quotes e.g. `_('''message''')`""" try: code = render_include(code) except TemplateError: # Exception will occur when it encounters John Resig's microtemplating code pass messages = [] messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("([^"]*)"').finditer(code)] messages += [(m.start(), m.groups()[0]) for m in re.compile("_\('([^']*)'").finditer(code)] if is_py: messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("{3}([^"]*)"{3}.*\)').finditer(code)] messages = [(pos, message) for pos, message in messages if is_translatable(message)] return pos_to_line_no(messages, code) def is_translatable(m): if re.search("[a-z]", m) and not m.startswith("icon-") and not m.endswith("px") and not m.startswith("eval:"): return True return False def pos_to_line_no(messages, code): ret = [] messages = sorted(messages, key=lambda x: x[0]) newlines = [m.start() for m in re.compile('\\n').finditer(code)] line = 1 newline_i = 0 for pos, message in messages: while newline_i < len(newlines) and pos > newlines[newline_i]: line+=1 newline_i+= 1 ret.append((line, message)) return ret def read_csv_file(path): """Read CSV file and return as list of list :param path: File path""" from csv import reader with codecs.open(path, 'r', 'utf-8') as msgfile: data = msgfile.read() # for japanese! #wtf data = data.replace(chr(28), "").replace(chr(29), "") data = reader([r.encode('utf-8') for r in data.splitlines()]) newdata = [[unicode(val, 'utf-8') for val in row] for row in data] return newdata def write_csv_file(path, app_messages, lang_dict): """Write translation CSV file. :param path: File path, usually `[app]/translations`. :param app_messages: Translatable strings for this app. :param lang_dict: Full translated dict. """ app_messages.sort(lambda x,y: cmp(x[1], y[1])) from csv import writer with open(path, 'wb') as msgfile: w = writer(msgfile, lineterminator='\n') for p, m in app_messages: t = lang_dict.get(m, '') # strip whitespaces t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t) w.writerow([p.encode('utf-8') if p else '', m.encode('utf-8'), t.encode('utf-8')]) def get_untranslated(lang, untranslated_file, get_all=False): """Returns all untranslated strings for a language and writes in a file :param lang: Language code. :param untranslated_file: Output file path. :param get_all: Return all strings, translated or not.""" clear_cache() apps = frappe.get_all_apps(True) messages = [] untranslated = [] for app in apps: messages.extend(get_messages_for_app(app)) messages = deduplicate_messages(messages) def escape_newlines(s): return (s.replace("\\\n", "|||||") .replace("\\n", "||||") .replace("\n", "|||")) if get_all: print str(len(messages)) + " messages" with open(untranslated_file, "w") as f: for m in messages: # replace \n with ||| so that internal linebreaks don't get split f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8")) else: full_dict = get_full_dict(lang) for m in messages: if not full_dict.get(m[1]): untranslated.append(m[1]) if untranslated: print str(len(untranslated)) + " missing translations of " + str(len(messages)) with open(untranslated_file, "w") as f: for m in untranslated: # replace \n with ||| so that internal linebreaks don't get split f.write((escape_newlines(m) + os.linesep).encode("utf-8")) else: print "all translated!" def update_translations(lang, untranslated_file, translated_file): """Update translations from a source and target file for a given language. :param lang: Language code (e.g. `en`). :param untranslated_file: File path with the messages in English. :param translated_file: File path with messages in language to be updated.""" clear_cache() full_dict = get_full_dict(lang) def restore_newlines(s): return (s.replace("|||||", "\\\n") .replace("| | | | |", "\\\n") .replace("||||", "\\n") .replace("| | | |", "\\n") .replace("|||", "\n") .replace("| | |", "\n")) translation_dict = {} for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False), frappe.get_file_items(translated_file, ignore_empty_lines=False)): # undo hack in get_untranslated translation_dict[restore_newlines(key)] = restore_newlines(value) full_dict.update(translation_dict) for app in frappe.get_all_apps(True): write_translations_file(app, lang, full_dict) def import_translations(lang, path): """Import translations from file in standard format""" clear_cache() full_dict = get_full_dict(lang) full_dict.update(get_translation_dict_from_file(path, lang, 'import')) for app in frappe.get_all_apps(True): write_translations_file(app, lang, full_dict) def rebuild_all_translation_files(): """Rebuild all translation files: `[app]/translations/[lang].csv`.""" for lang in get_all_languages(): for app in frappe.get_all_apps(): write_translations_file(app, lang) def write_translations_file(app, lang, full_dict=None, app_messages=None): """Write a translation file for a given language. :param app: `app` for which translations are to be written. :param lang: Language code. :param full_dict: Full translated language dict (optional). :param app_messages: Source strings (optional). """ if not app_messages: app_messages = get_messages_for_app(app) if not app_messages: return tpath = frappe.get_pymodule_path(app, "translations") frappe.create_folder(tpath) write_csv_file(os.path.join(tpath, lang + ".csv"), app_messages, full_dict or get_full_dict(lang)) def send_translations(translation_dict): """Append translated dict in `frappe.local.response`""" if "__messages" not in frappe.local.response: frappe.local.response["__messages"] = {} frappe.local.response["__messages"].update(translation_dict) def deduplicate_messages(messages): ret = [] op = operator.itemgetter(1) messages = sorted(messages, key=op) for k, g in itertools.groupby(messages, op): ret.append(g.next()) return ret def get_bench_dir(): return os.path.join(frappe.__file__, '..', '..', '..', '..') def rename_language(old_name, new_name): language_in_system_settings = frappe.db.get_single_value("System Settings", "language") if language_in_system_settings == old_name: frappe.db.set_value("System Settings", "System Settings", "language", new_name) frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""", { "old_name": old_name, "new_name": new_name })
mit
4,988,395,309,964,704,000
32.071547
139
0.691839
false
anqxyr/jarvis
jarvis/autoban.py
1
3248
#!/usr/bin/env python3 ############################################################################### # Module Imports ############################################################################### import arrow import collections import fnmatch import pyscp import re import threading from . import core, lex ############################################################################### PROFANITY = [ 'bitch', 'fuck', 'asshole', 'penis', 'vagina', 'nigger', 'retard', 'faggot', 'chink', 'shit', 'hitler', 'douche', 'bantest'] ############################################################################### # Helper Functions ############################################################################### Ban = collections.namedtuple('Ban', 'names hosts status reason thread') def get_ban_list(): wiki = pyscp.wikidot.Wiki('05command') soup = wiki('chat-ban-page')._soup tables = soup('table', class_='wiki-content-table') bans = {} for table in tables: chats = table('tr')[0].text.strip().split() rows = table('tr')[2:] for chat in chats: bans[chat] = list(map(parse_ban, rows)) return bans def parse_ban(row): names, hosts, status, reason, thread = [i.text for i in row('td')] names = [i for i in names.strip().lower().split() if 'generic' not in i] hosts = [fnmatch.translate(i) for i in hosts.strip().split()] hosts = [re.compile(i).match for i in hosts] return Ban(names, hosts, status, reason, thread) BANS = get_ban_list() def kick_user(inp, name, message): message = str(message) inp.raw(['KICK', inp.channel, name], message) def ban_user(inp, target, length): inp.raw(['MODE', inp.channel, '+b', target]) t = threading.Timer( length, lambda: inp.raw(['MODE', inp.channel, '-b', target])) t.start() ############################################################################### # Commands ############################################################################### @core.require(channel=core.config.irc.sssc) @core.command def updatebans(inp): """Update the ban list.""" global BANS try: BANS = get_ban_list() return lex.updatebans.updated except: return lex.updatebans.failed def autoban(inp, name, host): inp.user = 'OP Alert' if any(word in name.lower() for word in PROFANITY): kick_user(inp, name, lex.autoban.kick.name) ban_user(inp, host, 10) ban_user(inp, name, 900) return lex.autoban.name(user=name) banlist = BANS.get(inp.channel) if not banlist: return # find if the user is in the banlist bans = [ b for b in banlist if name.lower() in b.names or any(pat(host) for pat in b.hosts)] for ban in bans: try: # check if the ban has expired if arrow.get(ban.status, ['M/D/YYYY', 'YYYY-MM-DD']) < arrow.now(): continue except arrow.parser.ParserError: # if we can't parse the time, it's perma pass kick_user(inp, name, lex.autoban.kick.banlist(reason=ban.reason)) ban_user(inp, host, 900) return lex.autoban.banlist(user=name, truename=ban.names[0])
mit
-8,457,858,097,561,709,000
29.074074
79
0.502155
false
encukou/freeipa
ipapython/dogtag.py
1
8227
# Authors: Rob Crittenden <[email protected]> # # Copyright (C) 2009 Red Hat # see file 'COPYING' for use and warranty information # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import collections import gzip import io import logging from urllib.parse import urlencode import xml.dom.minidom import zlib import six # pylint: disable=ipa-forbidden-import from ipalib import api, errors from ipalib.util import create_https_connection from ipalib.errors import NetworkError from ipalib.text import _ # pylint: enable=ipa-forbidden-import from ipapython import ipautil # Python 3 rename. The package is available in "six.moves.http_client", but # pylint cannot handle classes from that alias try: import httplib except ImportError: # pylint: disable=import-error import http.client as httplib if six.PY3: unicode = str logger = logging.getLogger(__name__) Profile = collections.namedtuple('Profile', ['profile_id', 'description', 'store_issued']) INCLUDED_PROFILES = { Profile(u'caIPAserviceCert', u'Standard profile for network services', True), Profile(u'IECUserRoles', u'User profile that includes IECUserRoles extension from request', True), Profile(u'KDCs_PKINIT_Certs', u'Profile for PKINIT support by KDCs', False), Profile(u'acmeServerCert', u'ACME service certificate profile', False), } DEFAULT_PROFILE = u'caIPAserviceCert' KDC_PROFILE = u'KDCs_PKINIT_Certs' if six.PY3: gzip_decompress = gzip.decompress # pylint: disable=no-member else: # note: gzip.decompress available in Python >= 3.2 def gzip_decompress(data): with gzip.GzipFile(fileobj=io.BytesIO(data)) as f: return f.read() def error_from_xml(doc, message_template): try: item_node = doc.getElementsByTagName("Error") reason = item_node[0].childNodes[0].data return errors.RemoteRetrieveError(reason=reason) except Exception as e: return errors.RemoteRetrieveError(reason=message_template % e) def get_ca_certchain(ca_host=None): """ Retrieve the CA Certificate chain from the configured Dogtag server. """ if ca_host is None: ca_host = api.env.ca_host chain = None conn = httplib.HTTPConnection( ca_host, api.env.ca_install_port or 8080) conn.request("GET", "/ca/ee/ca/getCertChain") res = conn.getresponse() doc = None if res.status == 200: data = res.read() conn.close() try: doc = xml.dom.minidom.parseString(data) try: item_node = doc.getElementsByTagName("ChainBase64") chain = item_node[0].childNodes[0].data except IndexError: raise error_from_xml( doc, _("Retrieving CA cert chain failed: %s")) finally: if doc: doc.unlink() else: raise errors.RemoteRetrieveError( reason=_("request failed with HTTP status %d") % res.status) return chain def _parse_ca_status(body): doc = xml.dom.minidom.parseString(body) try: item_node = doc.getElementsByTagName("XMLResponse")[0] item_node = item_node.getElementsByTagName("Status")[0] return item_node.childNodes[0].data except IndexError: raise error_from_xml(doc, _("Retrieving CA status failed: %s")) def ca_status(ca_host=None): """Return the status of the CA, and the httpd proxy in front of it The returned status can be: - running - starting - Service Temporarily Unavailable """ if ca_host is None: ca_host = api.env.ca_host status, _headers, body = http_request( ca_host, 8080, '/ca/admin/ca/getStatus', # timeout: CA sometimes forgot to answer, we have to try again timeout=api.env.http_timeout) if status == 503: # Service temporarily unavailable return status elif status != 200: raise errors.RemoteRetrieveError( reason=_("Retrieving CA status failed with status %d") % status) return _parse_ca_status(body) def https_request( host, port, url, cafile, client_certfile, client_keyfile, method='POST', headers=None, body=None, **kw): """ :param method: HTTP request method (defalut: 'POST') :param url: The path (not complete URL!) to post to. :param body: The request body (encodes kw if None) :param kw: Keyword arguments to encode into POST body. :return: (http_status, http_headers, http_body) as (integer, dict, str) Perform a client authenticated HTTPS request """ def connection_factory(host, port): return create_https_connection( host, port, cafile=cafile, client_certfile=client_certfile, client_keyfile=client_keyfile, tls_version_min=api.env.tls_version_min, tls_version_max=api.env.tls_version_max) if body is None: body = urlencode(kw) return _httplib_request( 'https', host, port, url, connection_factory, body, method=method, headers=headers) def http_request(host, port, url, timeout=None, **kw): """ :param url: The path (not complete URL!) to post to. :param timeout: Timeout in seconds for waiting for reply. :param kw: Keyword arguments to encode into POST body. :return: (http_status, http_headers, http_body) as (integer, dict, str) Perform an HTTP request. """ body = urlencode(kw) if timeout is None: conn_opt = {} else: conn_opt = {"timeout": timeout} return _httplib_request( 'http', host, port, url, httplib.HTTPConnection, body, connection_options=conn_opt) def _httplib_request( protocol, host, port, path, connection_factory, request_body, method='POST', headers=None, connection_options=None): """ :param request_body: Request body :param connection_factory: Connection class to use. Will be called with the host and port arguments. :param method: HTTP request method (default: 'POST') :param connection_options: a dictionary that will be passed to connection_factory as keyword arguments. Perform a HTTP(s) request. """ if connection_options is None: connection_options = {} uri = u'%s://%s%s' % (protocol, ipautil.format_netloc(host, port), path) logger.debug('request %s %s', method, uri) logger.debug('request body %r', request_body) headers = headers or {} if ( method == 'POST' and 'content-type' not in (str(k).lower() for k in headers) ): headers['content-type'] = 'application/x-www-form-urlencoded' try: conn = connection_factory(host, port, **connection_options) conn.request(method, path, body=request_body, headers=headers) res = conn.getresponse() http_status = res.status http_headers = res.msg http_body = res.read() conn.close() except Exception as e: logger.debug("httplib request failed:", exc_info=True) raise NetworkError(uri=uri, error=str(e)) encoding = res.getheader('Content-Encoding') if encoding == 'gzip': http_body = gzip_decompress(http_body) elif encoding == 'deflate': http_body = zlib.decompress(http_body) logger.debug('response status %d', http_status) logger.debug('response headers %s', http_headers) logger.debug('response body (decoded): %r', http_body) return http_status, http_headers, http_body
gpl-3.0
6,374,223,435,190,193,000
31.389764
102
0.650055
false
guy-mograbi-at-gigaspaces/widget-dd-checks
build/vagrant/synced_folder/checks.d/pool_size.py
1
1677
import os import json import requests import re import unicodedata from checks import AgentCheck # http://code.activestate.com/recipes/577257-slugify-make-a-string-usable-in-a-url-or-filename/ _slugify_strip_re = re.compile(r'[^\w\s-]') _slugify_hyphenate_re = re.compile(r'[-\s]+') def _slugify(value): if not isinstance(value, unicode): value = unicode(value) value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') value = unicode(_slugify_strip_re.sub('', value).strip().lower()) return _slugify_hyphenate_re.sub('-', value) # curl -H "AccountUuid: " # url = def get_pool_status( self, instance ): endpoint = instance['endpoint'] headers = dict( AccountUuid=instance['account_uuid'] ) resp = requests.get(url= endpoint + 'admin/pools', headers=headers) pools = json.loads(resp.text) for pool in pools: slug_name = _slugify(pool['poolSettings']['name']) pool_id_str = str(pool['id']) # print pool_id_str + '::' + slug_name status = json.loads(requests.get(url= endpoint + 'admin/pools/' + pool_id_str + '/status', headers=headers).text)[pool_id_str] if status is not None: # print ( slug_name + '::' + json.dumps(status) ) for stat in status['countPerNodeStatus']: value = status['countPerNodeStatus'][stat] # value_str = str(value) # print(slug_name + "__" + stat + '::' + value_str ) self.gauge(str('poolsize.' + slug_name + '.' + stat), value) class HelloCheck(AgentCheck): def check(self, instance): get_pool_status(self,instance) # print(data)
mit
-7,010,372,455,501,986,000
29.509091
134
0.613596
false
omniscale/gbi-server
app/gbi_server/views/admin_services.py
1
9888
# This file is part of the GBI project. # Copyright (C) 2015 Omniscale GmbH & Co. KG <http://omniscale.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import render_template, flash, redirect, \ url_for, request, current_app, jsonify from flask.ext.babel import gettext as _ from sqlalchemy.exc import IntegrityError from geoalchemy2.shape import from_shape, to_shape from shapely.geometry import asShape from shapely.geometry import mapping import json from gbi_server.views.admin import admin from gbi_server.extensions import db from gbi_server.model import WMTS, WMS, WFS from gbi_server.forms.admin import WMTSForm, WMSForm, WFSForm from gbi_server.lib.external_wms import write_mapproxy_config from gbi_server.lib.capabilites import parse_capabilities_url @admin.route('/admin/wmts/list', methods=["GET"]) def wmts_list(): return render_template('admin/wmts_list.html', wmts=WMTS.query.all()) @admin.route('/admin/wmts/edit', methods=["GET", "POST"]) @admin.route('/admin/wmts/edit/<int:id>', methods=["GET", "POST"]) def wmts_edit(id=None): wmts = WMTS.by_id(id) if id else None if wmts: form = WMTSForm(request.form, wmts) else: form = WMTSForm(request.form) if form.validate_on_submit(): if not wmts: wmts = WMTS() db.session.add(wmts) wmts.url = form.data['url'] wmts.username = form.data['username'] wmts.password = form.data['password'] wmts.is_protected = form.data['is_protected'] wmts.name = form.data['name'] wmts.title = form.data['title'] wmts.format = form.data['format'] wmts.max_tiles = form.data['max_tiles'] or None try: view_coverage = json.loads(form.data['view_coverage']) only_first_geometry = False view_geometry = None # check if we have a feature colleciton than load only first geometry if 'features' in view_coverage: for feature in view_coverage['features']: if 'geometry' in feature: if view_geometry: only_first_geometry = True break view_geometry = feature['geometry'] if view_geometry: view_coverage = view_geometry if only_first_geometry: flash(_('Only the first geometry was used for view coverage'), 'success') wmts.view_coverage = from_shape(asShape(view_coverage), srid=4326) except: db.session.rollback() flash(_('Geometry not correct'), 'error') return render_template('admin/wmts_edit.html', form=form, id=id) wmts.view_level_start = form.data['view_level_start'] wmts.view_level_end = form.data['view_level_end'] wmts.is_background_layer = form.data['is_background_layer'] wmts.is_overlay = form.data['is_transparent'] wmts.is_transparent = form.data['is_transparent'] wmts.is_visible = form.data['is_visible'] wmts.is_public = form.data['is_public'] wmts.is_accessible = form.data['is_accessible'] try: db.session.commit() write_mapproxy_config(current_app) flash(_('Saved WMTS'), 'success') return redirect(url_for('admin.wmts_list')) except IntegrityError: db.session.rollback() flash(_('WMTS with this name already exist'), 'error') # load wmts_coverage as json if wmts: view_coverage = to_shape(wmts.view_coverage) form.view_coverage.data = json.dumps(mapping(view_coverage)) return render_template('admin/wmts_edit.html', form=form, id=id) @admin.route('/admin/wmts/remove/<int:id>', methods=["POST"]) def wmts_remove(id): wmts = WMTS.by_id(id) db.session.delete(wmts) db.session.commit() flash(_('WMTS removed'), 'success') return redirect(url_for('admin.wmts_list')) @admin.route('/admin/wms/capabilities', methods=["GET"]) def wms_capabilities(): url = request.args.get('url', False) if not url: return jsonify(error=_('Need url for capabilities')) try: data = parse_capabilities_url(url) except: data = {'error': 'load capabilities not possible'} return jsonify(data=data) @admin.route('/admin/wms/list', methods=["GET"]) def wms_list(): return render_template('admin/wms_list.html', wms=WMS.query.all()) @admin.route('/admin/wms/edit', methods=["GET", "POST"]) @admin.route('/admin/wms/edit/<int:id>', methods=["GET", "POST"]) def wms_edit(id=None): wms = WMS.by_id(id) if id else None if wms: form = WMSForm(request.form, wms) else: form = WMSForm(request.form) if form.validate_on_submit(): if not wms: wms = WMS() db.session.add(wms) wms.url = form.data['url'] wms.username = form.data['username'] wms.password = form.data['password'] wms.is_protected = form.data['is_protected'] wms.name = form.data['name'] wms.title = form.data['title'] wms.layer = form.data['layer'] wms.format = form.data['format'] wms.srs = form.data['srs'] wms.max_tiles = form.data['max_tiles'] or None wms.version = form.data['version'] try: view_coverage = json.loads(form.data['view_coverage']) only_first_geometry = False view_geometry = None # check if we have a feature colleciton than load only first geometry if 'features' in view_coverage: for feature in view_coverage['features']: if 'geometry' in feature: if view_geometry: only_first_geometry = True break view_geometry = feature['geometry'] if view_geometry: view_coverage = view_geometry if only_first_geometry: flash(_('Only the first geometry was used for view coverage'), 'success') wms.view_coverage = from_shape(asShape(view_coverage), srid=4326) except: db.session.rollback() flash(_('Geometry not correct'), 'error') return render_template('admin/wmts_edit.html', form=form, id=id) wms.view_level_start = form.data['view_level_start'] wms.view_level_end = form.data['view_level_end'] wms.is_background_layer = form.data['is_background_layer'] wms.is_overlay = form.data['is_transparent'] wms.is_transparent = form.data['is_transparent'] wms.is_visible = form.data['is_visible'] wms.is_public = form.data['is_public'] # we only support WMS with direct access wms.is_accessible = True try: db.session.commit() write_mapproxy_config(current_app) flash(_('Saved WMS'), 'success') return redirect(url_for('admin.wms_list')) except IntegrityError, ex: print ex db.session.rollback() flash(_('WMS with this name already exist'), 'error') # load wmts_coverage as json if wms: view_coverage = to_shape(wms.view_coverage) form.view_coverage.data = json.dumps(mapping(view_coverage)) return render_template('admin/wms_edit.html', form=form, id=id) @admin.route('/admin/wms/remove/<int:id>', methods=["POST"]) def wms_remove(id): wms = WMS.by_id(id) db.session.delete(wms) db.session.commit() flash(_('WMS removed'), 'success') return redirect(url_for('admin.wms_list')) @admin.route('/admin/wfs/list', methods=["GET"]) def wfs_list(): return render_template('admin/wfs_list.html', wfs=WFS.query.all()) @admin.route('/admin/wfs/edit', methods=["GET", "POST"]) @admin.route('/admin/wfs/edit/<int:id>', methods=["GET", "POST"]) def wfs_edit(id=None): wfs = db.session.query(WFS).filter_by(id=id).first() if id else None form = WFSForm(request.form, wfs) if form.validate_on_submit(): if not wfs: wfs = WFS() db.session.add(wfs) wfs.name = form.data['name'] wfs.host = form.data['host'] wfs.url = form.data['url'] wfs.geometry = form.data['geometry'] wfs.layer = form.data['layer'] wfs.srs = form.data['srs'] wfs.ns_prefix = form.data['ns_prefix'] wfs.ns_uri = form.data['ns_uri'] wfs.search_property = form.data['search_property'] wfs.max_features = form.data['max_features'] wfs.username = form.data['username'] wfs.password = form.data['password'] wfs.is_protected = form.data['is_protected'] try: db.session.commit() flash(_('Saved WFS'), 'success') return redirect(url_for('admin.wfs_list')) except IntegrityError: db.session.rollback() flash(_('WFS with this name already exist'), 'error') return render_template('admin/wfs_edit.html', form=form, id=id) @admin.route('/admin/wfs/remove/<int:id>', methods=["POST"]) def wfs_remove(id): wfs = WFS.by_id(id) db.session.delete(wfs) db.session.commit() flash(_('WFS removed'), 'success') return redirect(url_for('admin.wfs_list'))
apache-2.0
5,397,853,354,094,718,000
34.568345
89
0.605583
false
crcresearch/osf.io
osf/models/preprint_service.py
1
9785
# -*- coding: utf-8 -*- import urlparse from dirtyfields import DirtyFieldsMixin from django.db import models from django.utils import timezone from django.utils.functional import cached_property from django.contrib.contenttypes.fields import GenericRelation from framework.postcommit_tasks.handlers import enqueue_postcommit_task from framework.exceptions import PermissionsError from osf.models import NodeLog, Subject from osf.models.validators import validate_subject_hierarchy from osf.utils.fields import NonNaiveDateTimeField from website.preprints.tasks import on_preprint_updated, get_and_set_preprint_identifiers from website.project.licenses import set_license from website.util import api_v2_url from website.util.permissions import ADMIN from website import settings, mails from reviews.models.mixins import ReviewableMixin from reviews.workflow import States from osf.models.base import BaseModel, GuidMixin from osf.models.identifiers import IdentifierMixin, Identifier class PreprintService(DirtyFieldsMixin, GuidMixin, IdentifierMixin, ReviewableMixin, BaseModel): provider = models.ForeignKey('osf.PreprintProvider', on_delete=models.SET_NULL, related_name='preprint_services', null=True, blank=True, db_index=True) node = models.ForeignKey('osf.AbstractNode', on_delete=models.SET_NULL, related_name='preprints', null=True, blank=True, db_index=True) is_published = models.BooleanField(default=False, db_index=True) date_published = NonNaiveDateTimeField(null=True, blank=True) original_publication_date = NonNaiveDateTimeField(null=True, blank=True) license = models.ForeignKey('osf.NodeLicenseRecord', on_delete=models.SET_NULL, null=True, blank=True) subjects = models.ManyToManyField(blank=True, to='osf.Subject', related_name='preprint_services') identifiers = GenericRelation(Identifier, related_query_name='preprintservices') preprint_doi_created = NonNaiveDateTimeField(default=None, null=True, blank=True) class Meta: unique_together = ('node', 'provider') permissions = ( ('view_preprintservice', 'Can view preprint service details in the admin app.'), ) def __unicode__(self): return '{} preprint (guid={}) of {}'.format('published' if self.is_published else 'unpublished', self._id, self.node.__unicode__() if self.node else None) @property def verified_publishable(self): return self.is_published and self.node.is_preprint and not self.node.is_deleted @property def primary_file(self): if not self.node: return return self.node.preprint_file @property def article_doi(self): if not self.node: return return self.node.preprint_article_doi @property def preprint_doi(self): return self.get_identifier_value('doi') @property def is_preprint_orphan(self): if not self.node: return return self.node.is_preprint_orphan @cached_property def subject_hierarchy(self): return [ s.object_hierarchy for s in self.subjects.exclude(children__in=self.subjects.all()) ] @property def deep_url(self): # Required for GUID routing return '/preprints/{}/'.format(self._primary_key) @property def url(self): if (self.provider.domain_redirect_enabled and self.provider.domain) or self.provider._id == 'osf': return '/{}/'.format(self._id) return '/preprints/{}/{}/'.format(self.provider._id, self._id) @property def absolute_url(self): return urlparse.urljoin( self.provider.domain if self.provider.domain_redirect_enabled else settings.DOMAIN, self.url ) @property def absolute_api_v2_url(self): path = '/preprints/{}/'.format(self._id) return api_v2_url(path) def has_permission(self, *args, **kwargs): return self.node.has_permission(*args, **kwargs) def get_subjects(self): ret = [] for subj_list in self.subject_hierarchy: subj_hierarchy = [] for subj in subj_list: if subj: subj_hierarchy += ({'id': subj._id, 'text': subj.text}, ) if subj_hierarchy: ret.append(subj_hierarchy) return ret def set_subjects(self, preprint_subjects, auth): if not self.node.has_permission(auth.user, ADMIN): raise PermissionsError('Only admins can change a preprint\'s subjects.') old_subjects = list(self.subjects.values_list('id', flat=True)) self.subjects.clear() for subj_list in preprint_subjects: subj_hierarchy = [] for s in subj_list: subj_hierarchy.append(s) if subj_hierarchy: validate_subject_hierarchy(subj_hierarchy) for s_id in subj_hierarchy: self.subjects.add(Subject.load(s_id)) self.save(old_subjects=old_subjects) def set_primary_file(self, preprint_file, auth, save=False): if not self.node.has_permission(auth.user, ADMIN): raise PermissionsError('Only admins can change a preprint\'s primary file.') if preprint_file.node != self.node or preprint_file.provider != 'osfstorage': raise ValueError('This file is not a valid primary file for this preprint.') existing_file = self.node.preprint_file self.node.preprint_file = preprint_file # only log if updating the preprint file, not adding for the first time if existing_file: self.node.add_log( action=NodeLog.PREPRINT_FILE_UPDATED, params={ 'preprint': self._id }, auth=auth, save=False ) if save: self.save() self.node.save() def set_published(self, published, auth, save=False): if not self.node.has_permission(auth.user, ADMIN): raise PermissionsError('Only admins can publish a preprint.') if self.is_published and not published: raise ValueError('Cannot unpublish preprint.') self.is_published = published if published: if not (self.node.preprint_file and self.node.preprint_file.node == self.node): raise ValueError('Preprint node is not a valid preprint; cannot publish.') if not self.provider: raise ValueError('Preprint provider not specified; cannot publish.') if not self.subjects.exists(): raise ValueError('Preprint must have at least one subject to be published.') self.date_published = timezone.now() self.node._has_abandoned_preprint = False # In case this provider is ever set up to use a reviews workflow, put this preprint in a sensible state self.reviews_state = States.ACCEPTED.value self.date_last_transitioned = self.date_published self.node.add_log( action=NodeLog.PREPRINT_INITIATED, params={ 'preprint': self._id }, auth=auth, save=False, ) if not self.node.is_public: self.node.set_privacy( self.node.PUBLIC, auth=None, log=True ) # This should be called after all fields for EZID metadta have been set enqueue_postcommit_task(get_and_set_preprint_identifiers, (), {'preprint_id': self._id}, celery=True) self._send_preprint_confirmation(auth) if save: self.node.save() self.save() def set_preprint_license(self, license_detail, auth, save=False): license_record, license_changed = set_license(self, license_detail, auth, node_type='preprint') if license_changed: self.node.add_log( action=NodeLog.PREPRINT_LICENSE_UPDATED, params={ 'preprint': self._id, 'new_license': license_record.node_license.name }, auth=auth, save=False ) if save: self.save() def set_identifier_values(self, doi, ark, save=False): self.set_identifier_value('doi', doi) self.set_identifier_value('ark', ark) self.preprint_doi_created = timezone.now() if save: self.save() def save(self, *args, **kwargs): first_save = not bool(self.pk) saved_fields = self.get_dirty_fields() or [] old_subjects = kwargs.pop('old_subjects', []) ret = super(PreprintService, self).save(*args, **kwargs) if (not first_save and 'is_published' in saved_fields) or self.is_published: enqueue_postcommit_task(on_preprint_updated, (self._id,), {'old_subjects': old_subjects}, celery=True) return ret def _send_preprint_confirmation(self, auth): # Send creator confirmation email if self.provider._id == 'osf': email_template = getattr(mails, 'PREPRINT_CONFIRMATION_DEFAULT') else: email_template = getattr(mails, 'PREPRINT_CONFIRMATION_BRANDED')(self.provider) mails.send_mail( auth.user.username, email_template, user=auth.user, node=self.node, preprint=self )
apache-2.0
9,132,083,858,299,472,000
36.205323
162
0.6093
false
ltowarek/rpi-dehumidifier
rpi-dehumidifier.py
1
1035
import time import lcddriver import RPi.GPIO as GPIO from Adafruit_BME280 import BME280, BME280_OSAMPLE_8 RELAY_PIN = 21 LIMIT = 60 try: GPIO.setmode(GPIO.BCM) GPIO.setup(RELAY_PIN, GPIO.OUT) lcd = lcddriver.lcd() while True: sensor = BME280(mode=BME280_OSAMPLE_8) degrees = sensor.read_temperature() pascals = sensor.read_pressure() hectopascals = pascals / 100 humidity = sensor.read_humidity() print 'Timestamp = {0:0.3f}'.format(sensor.t_fine) print 'Temp = {0:0.3f} deg C'.format(degrees) print 'Pressure = {0:0.2f} hPa'.format(hectopascals) print 'Humidity = {0:0.2f} %'.format(humidity) lcd.lcd_clear() lcd.lcd_display_string('Humidity:', 1) lcd.lcd_display_string('{0:0.2f}%'.format(humidity), 2) if humidity > LIMIT: GPIO.output(RELAY_PIN, GPIO.LOW) else: GPIO.output(RELAY_PIN, GPIO.HIGH) time.sleep(3) except KeyboardInterrupt: GPIO.cleanup()
gpl-2.0
7,420,419,066,889,055,000
24.875
63
0.608696
false
ajrichards/GenesDI
genesdi/View.py
1
40260
''' A. Richards ''' import os,re,sys,time sys.path.append(os.path.join(".","MyWidgets")) #from PIL import Image from ImageTk import PhotoImage import Tkinter as tk import tkMessageBox from tkFileDialog import askopenfilename import numpy as np from FileControls import * from BuildingBlocks import * from DataProcessingSettings import DataProcessingSettings from QualityAssuranceSettings import QualityAssuranceSettings from ModelSettings import ModelSettings from ResultsNavigationSettings import ResultsNavigationSettings from DataProcessingLeft import DataProcessingLeft from ResolutionSelector import ResolutionSelector from QualityAssuranceLeft import QualityAssuranceLeft from ResultsNavigationLeft import ResultsNavigationLeft from ExistingProjectsMenu import ExistingProjectsMenu from ImageButtonMagic import ImageButtonMagic from Statebar import Statebar class View: def __init__(self,master,controller,model): self.master = master self.controller = controller self.log = self.controller.log self.model = model self.messageBoard = None self.centerAreaFrame = None self.leftAreaFrame = None self.dataProcessingSettings = None self.topAreaFrame = None self.stateBar = None self.stateList = ['Data Processing', 'Quality Assurance', 'Model', 'Results Navigation'] self.bgcolor = self.master.cget("bg") #"#DDDDFF" self.fgcolor = "#FFFF99" #"#CCCC66" self.lineNumber = 0 # data processing variables self.selectedTransform = 'option 1' self.selectedCompensation = 'option 1' self.subsampleStates = None # results navigation variables self.resultsMode = 'model select' self.selectedResultsChannel1 = None self.selectedResultsChannel2 = None self.selectedComponents = None ## change the window title (default is Tk) self.master.wm_title(self.controller.appName) ## set area to screen size self.set_area_to_screen_size() ### create status bar self.status = StatusBar(self.master,self.controller) ### create the top area self.topArea = tk.Frame(self.master) self.topArea.pack(fill=tk.X,side=tk.TOP) self.topArea.config(bg=self.bgcolor) ### create frame pieces self.render_menus() self.render_rt_side() self.render_main_canvas() self.render_left_side() def set_area_to_screen_size(self): self.w,self.h = float(self.master.winfo_screenwidth()),float(self.master.winfo_screenheight()) ## check for dual screens if self.w > 2000: self.w = self.w / 2.0 #self.controller.root.overrideredirect(1) # if you also want to get rid of the titlebar self.master.geometry("%dx%d+0+0"%(self.w,self.h)) def set_state(self,state,progressbarMsg=None,img=None,comparisonStates=None,resultsNavMode=None): self.log.log['currentState'] = state ## save project if self.controller.homeDir: self.log.write() print 'saving project at', self.log.log['currentState'] ## keep track of the highest state if self.stateList.__contains__(self.log.log['currentState']): if self.stateList.index(self.log.log['currentState']) > self.log.log['highestState']: self.log.log['highestState'] = self.stateList.index(self.log.log['currentState']) if self.log.log['currentState'] == 'Progressbar': self.render_main_canvas(canvasState='progressbar',progressbarMsg=progressbarMsg) elif self.log.log['currentState'] == 'configuration': self.render_main_canvas(canvasState='configuration') elif self.log.log['currentState'] == 'projects menu': self.render_main_canvas(canvasState='projects menu') elif self.log.log['currentState'] == 'initial': self.render_main_canvas(canvasState='initial') elif self.log.log['currentState'] == 'Data Processing': self.render_main_canvas(canvasState='processing') elif self.log.log['currentState'] == 'Quality Assurance': self.render_main_canvas(canvasState='qa',img=img,comparisonStates=comparisonStates) elif self.log.log['currentState'] == 'Model': self.render_main_canvas(canvasState='model') elif self.log.log['currentState'] == 'Results Navigation': self.render_main_canvas(canvasState='results',resultsNavMode=None) else: print "ERROR: invalid state specified: %s" %state self.render_state() ############################################################################## # # image manipulation functions # ############################################################################## def render_state(self): if self.stateBar != None: self.stateBar.destroy() self.stateBar = Statebar(self.topArea,self.stateList,self.log.log['currentState'],handleTransition=self.handle_state_transition) self.stateBar.config(bg=self.bgcolor) self.stateBar.pack(side=tk.TOP,fill=tk.X,pady=2,padx=5) def mount_image(self,fileName,recreate=True): if fileName != "experiment_icon": fileName = os.path.join(self.controller.homeDir,'figs',fileName) if os.path.isfile(fileName+".png") == False: print "ERROR: Bad specified image file name" self.i = PhotoImage(file = fileName + ".gif") else: self.i = PhotoImage(file = fileName + ".gif") else: self.i = PhotoImage(file = fileName + ".gif") imageX = 0.5*self.canvasWidth imageY = 0.45*self.canvasHeight self.canvas.delete("image") self.canvas.pack(fill=tk.BOTH,side=tk.TOP,anchor=tk.N) self.canvas.create_image(imageX,imageY,image=self.i,tags="image") self.canvas.config(bg=self.bgcolor) def unmount_image(self): self.canvas.delete(image) ############################################################################## # # menu functions # ############################################################################## def render_menus(self): ## initialize menu and menu commands as none self.filemenu = None self.helpmenu = None self.actionmenu = None ## load appropriate menus print 'loading menu' self.menu = tk.Menu(self.master) self.master.config(menu=self.menu) ## file menu self.filemenu = tk.Menu(self.menu) self.menu.add_cascade(label="File", menu = self.filemenu) self.filemenu.add_command(label="Create new project", command = self.create_new_project) self.filemenu.add_command(label="Open existing project", command = self.open_existing_projects_menu) self.filemenu.add_command(label="Export project", command = self.callback) self.filemenu.add_command(label="View project logfile", command = self.callback) if self.log.log['currentState'] in ['Quality Assurance','Model','Results Navigation']: self.filemenu.add_command(label="Export image", command = self.controller.saveas_image) self.filemenu.add_separator() if self.log.log['currentState'] in ['Data Processing','Quality Assurance','Model Preparation','Results Navigation']: self.filemenu.add_command(label="Close current project", command = self.topArea.quit) self.filemenu.add_command(label="Exit", command = self.topArea.quit) ## view menu self.viewmenu = tk.Menu(self.master) self.menu.add_cascade(label="View", menu=self.viewmenu) self.viewmenu.add_command(label="Set the default screen resolution", command=self.set_default_screen_resolution) ## action menu if self.log.log['currentState'] in ['Data Processing','Quality Assurance','Model','Results Navigation']: self.actionmenu = tk.Menu(self.menu) self.menu.add_cascade(label="Action", menu = self.actionmenu) self.actionmenu.add_command(label="action1", command = self.callback) if self.log.log['currentState'] in ['Data Processing']: self.actionmenu.add_command(label="Load additional file", command = self.controller.load_additional_fcs_files) ## help menu self.helpmenu = tk.Menu(self.menu) self.menu.add_cascade(label="Help", menu = self.helpmenu) self.helpmenu.add_command(label="User Manual", command = self.controller.show_documentation) self.helpmenu.add_command(label="About...", command = self.callback) def render_rt_side(self): ### create the right side self.rightAreaFrame = tk.Frame(self.master) #self.rightAreaFrame.pack_propagate(0) self.rightAreaFrame["bg"] = self.bgcolor self.rightArea = tk.Canvas(self.rightAreaFrame, width = 0.02 * self.w, height = 0.95 * self.h) self.rightArea.configure(background = self.bgcolor) self.rightAreaFrame.pack(side = tk.RIGHT) self.rightArea.pack() ############################################################################## # # main canvas functions # ############################################################################## def open_existing_projects_menu(self): self.set_state('projects menu') def open_existing_project(self): selectedProject = self.existingProjectsMenu.get_selected_project() if selectedProject != None: self.controller.projectID = selectedProject self.controller.homeDir = os.path.join(".","projects",self.controller.projectID) self.log.initialize(self.controller.projectID,self.controller.homeDir,load=True) self.controller.handle_subsampling() self.set_state(self.log.log['currentState']) print "the current state: ", self.log.log['currentState'] else: self.display_warning("You must select an existing project first") def render_main_canvas(self,canvasState='initial',progressbarMsg=None,img=None,comparisonStates=None,useCurrentStates=False,resultsNavMode=None): ### create the center if self.centerAreaFrame != None: self.centerAreaFrame.destroy() self.canvasWidth = 0.72 * self.w # 0.72 self.canvasHeight = 0.95 * self.h # 0.85 remember to change them all self.controller.mainAreaWidth = self.canvasWidth self.controller.mainAreaHeight = self.canvasHeight self.textSpacing = 0.02* self.canvasHeight self.centerAreaFrame = tk.Frame(self.master,height=self.canvasHeight,width=self.canvasWidth) self.centerAreaFrame["bg"] = self.bgcolor self.centerAreaFrame.pack_propagate(0) # don't shrink self.centerAreaFrame.pack(side = tk.RIGHT) self.canvas = tk.Canvas(self.centerAreaFrame,height=self.canvasHeight,width=self.canvasWidth) if canvasState != 'initial' and self.controller.homeDir != None: fileNames = get_fcs_file_names(self.controller.homeDir) if canvasState == 'initial': self.mount_image("experiment_icon") print 'rendering main canvas -- initial' self.canvas.pack() elif canvasState == 'configuration': msg = "Your current screen geometry is %sx%s\nHere you may select another resolution\n\n"%(int(self.w), int(self.h)) self.resolutionSelector = ResolutionSelector(self.centerAreaFrame,msg,bg=self.bgcolor,fg=self.fgcolor,command=self.change_default_screen_resolution) elif canvasState == 'progressbar': self.progressbar = Progressbar(self.centerAreaFrame,withLabel=progressbarMsg,bg=self.bgcolor,fg=self.fgcolor) self.progressbar.pack(side=tk.TOP,fill=tk.BOTH,pady=0.2*self.canvasHeight,anchor=tk.S) elif canvasState == 'processing': dataProcessing = True masterChannelList = self.model.get_master_channel_list() if useCurrentStates == False: self.set_selected_channel_states() self.dataProcessingSettings = DataProcessingSettings(self.centerAreaFrame,fileNames,masterChannelList,width=self.canvasWidth, bg=self.bgcolor,fg=self.fgcolor,showNameBtn=self.display_file_by_button, loadbtnFn=self.controller.load_additional_fcs_files,selectAllFn=self.handle_select_all_channels, channelStates=self.controller.log.log['processingChannels'],contbtnFn=self.processing_to_qa) elif canvasState == 'qa': imgNames = get_img_file_names(self.controller.homeDir) imgNames = [image[:-4] for image in imgNames] self.canvas = tk.Canvas(self.centerAreaFrame,height=self.canvasHeight,width=self.canvasWidth) if img != None: self.mount_image(img) # img self.canvas.pack(anchor=tk.N,side=tk.TOP) else: mode = 'view all' self.qualityAssuranceSettings = QualityAssuranceSettings(self.centerAreaFrame,mode,fileNames,imgNames,self.controller.homeDir, imgHandler = self.show_selected_image, width=self.canvasWidth, bg=self.bgcolor,fg=self.fgcolor,allStates=comparisonStates, contbtnFn=self.qa_to_model,selectAllCmd=None) self.qualityAssuranceSettings.pack() elif canvasState == 'model': self.modelSettings = ModelSettings(self.centerAreaFrame,fileNames,width=self.canvasWidth, bg=self.bgcolor,fg=self.fgcolor,runModelBtnFn=self.model_to_results_navigation, contbtnFn=self.model_to_results_navigation) elif canvasState == 'results': modelList = get_models_run(self.controller.homeDir) if self.resultsMode in ['model select']: self.resultsNavigationSettings = ResultsNavigationSettings(self.centerAreaFrame,modelList,self.resultsMode,width=self.canvasWidth, dblClickBtnFn=self.show_model_log,bg=self.bgcolor,fg=self.fgcolor, loadbtnFn=None,contBtnFn=self.handle_results_navigation_settings) self.resultsSelectCanvas = tk.Canvas(self.centerAreaFrame,width=0.5*self.canvasWidth,height=0.5*self.canvasHeight) self.resultsSelectCanvas.configure(background=self.bgcolor) self.resultsSelectCanvas.pack(side=tk.LEFT) else: if self.selectedResultsChannel1 == None or self.selectedResultsChannel2 == None: fileName = self.log.log['selectedFile'] imgDir = os.path.join(self.controller.homeDir,'figs',re.sub(fileName[:-4]+"\_","",self.log.log['selectedModel'])) imageHandler = self.show_selected_image self.ibm = ImageButtonMagic(self.centerAreaFrame,imgDir,imageHandler) self.ibm.config(bg=self.bgcolor) self.ibm.pack() else: ### get model if not self.statModel: self.statModel,self.modelClasses = self.model.load_model_results_pickle(self.log.log['selectedModel']) try: self.statModel except: self.statModel = None self.statModelClasses = None self.mplCanvas = self.model.make_scatter_plot(self.log.log['selectedFile'],self.selectedResultsChannel1, self.selectedResultsChannel2,labels=self.statModelClasses, root=self.centerAreaFrame,width=self.canvasWidth,height=self.canvasHeight,getCanvas=True) self.mplCanvas._tkcanvas.pack(fill=tk.BOTH,expand=1) elif canvasState == 'projects menu': existingProjects = get_project_names() self.existingProjectsMenu = ExistingProjectsMenu(self.centerAreaFrame,existingProjects,bg=self.bgcolor,fg=self.fgcolor, command=self.open_existing_project,loadBtnFn=self.open_existing_project) self.existingProjectsMenu.pack() ### finally render the left side self.render_left_side() def display_file_by_button(self,ind): fileList = get_fcs_file_names(self.controller.homeDir) if ind == 0: self.status.set("Does that look like a file to you?") else: self.status.set(fileList[ind-1]) def handle_select_all_channels(self): self.set_state("Data Processing") def set_selected_channel_states(self): masterList = self.model.get_master_channel_list() fileList = get_fcs_file_names(self.controller.homeDir) ## create a list of list to represent the channel states (each list is one row) channelStates = [ [0]+np.zeros(len(masterList),dtype=int).tolist()+[0,0,0] for c in range(len(fileList))] ## if applicable get selected channels if self.dataProcessingSettings != None: selectedChannels = self.dataProcessingSettings.get_selected_channels() for row in range(len(selectedChannels)): for col in range(len(selectedChannels[0])): state = selectedChannels[row][col] if state != 0: channelStates[row][col] = state ## check to see if select all was used selectedRow = None if self.dataProcessingSettings != None: currentChannels = self.log.log['processingChannels'] for row in range(len(selectedChannels)): stateSelected = selectedChannels[row][0] stateCurrent = currentChannels[row][0] if stateSelected != stateCurrent: selectedRow = row if selectedRow != None: if currentChannels[selectedRow][0] == 1: channelStates[selectedRow] = [0 for i in range(len(currentChannels[row]))] elif currentChannels[selectedRow][0] == 0: channelStates[selectedRow] = [1 for i in range(len(currentChannels[row]))] ## ready unavailable channels for disabling (-1) for f in range(len(fileList)): file = fileList[f] channels = self.model.get_file_channel_list(file) masterIndices = [np.where(np.array(masterList) == ch)[0][0] for ch in channels] disabledChannels = list(set(range(len(masterList))).difference(set(masterIndices))) for c in range(len(masterList)): if c in disabledChannels: channelStates[f][c+1] = -1 self.controller.log.log['processingChannels'] = channelStates ############################################################################## # # messageboard, fileselector # ############################################################################## def render_left_side(self,selectAllCompares=False): ### create the left side if self.leftAreaFrame != None: self.leftAreaFrame.destroy() self.leftAreaFrame = tk.Frame(self.master,height=self.canvasHeight) self.leftAreaFrame["bg"] = self.bgcolor self.leftAreaFrame.pack(side = tk.RIGHT,expand=tk.Y) ## to be carried out everytime the left are frame is rendered if self.controller.homeDir: fileList = get_fcs_file_names(self.controller.homeDir) imgNames = get_img_file_names(self.controller.homeDir) if self.log.log['selectedFile'] == None: self.log.log['selectedFile'] = fileList[0] fileStates = [tk.NORMAL for i in fileList] fileStates[fileList.index(self.log.log['selectedFile'])] = tk.ACTIVE ## data processing if self.log.log['currentState'] == 'Data Processing': self.dataProcessingLeft = DataProcessingLeft(self.leftAreaFrame,fileList,fileStates,bg=self.bgcolor,fg=self.fgcolor,subsampleStates=self.subsampleStates, selectedTransformCmd=self.set_selected_transform,subsampleFn=self.set_subsample, selectedCompCmd=self.set_selected_compensation,selectedCompensation=self.selectedCompensation, selectedTransform = self.selectedTransform,uploadBtnFn=None,uploadFcsFn=self.controller.load_additional_fcs_files, rmBtnFn=self.rm_selected_file,fileSelectorResponse=self.file_selector_response) self.dataProcessingLeft.pack(side=tk.TOP) ## quality assurance elif self.log.log['currentState'] == 'Quality Assurance': self.qualityAssuranceLeft = QualityAssuranceLeft(self.leftAreaFrame,imgNames,fileList,fileStates,fileSelectorResponse=self.file_selector_response, bg=self.bgcolor,fg=self.fgcolor,viewSelectedCmd=self.show_selected_image, contbtnFn=self.qa_to_model,returnBtnFn=self.close_image_viewer,viewAllFn=self.handle_view_all) self.qualityAssuranceLeft.pack(side=tk.TOP) ## results navigation elif self.log.log['currentState'] == 'Results Navigation': modelNames = get_models_run(self.controller.homeDir) channelList = self.model.get_file_channel_list(self.log.log['selectedFile']) model = None # get file specific indices try: self.statModel,self.statModelClasses = self.model.load_model_results_pickle(self.log.log['selectedModel']) channelsSelected = self.log.log['resultsChannels'] fileSpecificIndices = [channelList.index(i) for i in channelsSelected] except: fileSpecificIndices = None self.statModel = None self.statModelClasses = None channelsSelected = None if self.log.log['componentStates'] != None: componentStates = self.log.log['componentStates'] else: componentStates = None self.resultsNavigationLeft = ResultsNavigationLeft(self.leftAreaFrame,channelList,self.resultsMode,fileList,fileStates,self.statModel, selectedInds = fileSpecificIndices,fileSelectorResponse=self.file_selector_response, bg=self.bgcolor,fg=self.fgcolor,viewSelectedCmd=self.show_selected_image, contbtnFn=self.qa_to_model,componentStates = componentStates,viewAllFn=self.handle_view_all, returnBtnFn=self.close_image_viewer,rerenderFn=self.handle_results_navigation_figures) self.resultsNavigationLeft.pack(side=tk.TOP) self.messageBoard = tk.Canvas(self.leftAreaFrame,width=0.3*self.w,height=self.canvasHeight) self.messageBoard.configure(background = self.bgcolor) self.messageBoard.pack(side=tk.LEFT,expand=tk.Y) ## this function is called from the fileselector widget def file_selector_response(self): if self.log.log['currentState'] == "Data Processing": self.lineNumber = 0 self.log.log['selectedFile'] = self.dataProcessingLeft.get_selected_file() file = self.model.pyfcm_load_fcs_file(self.log.log['selectedFile']) n,d = np.shape(file) self.render_left_side() self.message_board_display("file - %s "%self.log.log['selectedFile']) self.message_board_display("observations - %s "%n) self.message_board_display("dimensions - %s "%d) else: print "resonse method not available yet" ######################################################################### # # Results navigation handles # ######################################################################### def handle_results_navigation_settings(self): selectedModel = self.resultsNavigationSettings.get_selected_model() fileList = get_fcs_file_names(self.controller.homeDir) fileName = None if selectedModel != None: for file in fileList: m = re.search(re.sub("[\.fcs|\.pickle]","",file),selectedModel) if m: fileName = m.group(0) if selectedModel != None: self.log.log['selectedModel'] = selectedModel self.log.log['selectedFile'] = fileName + ".fcs" self.resultsMode = 'mixture components' self.set_state('Results Navigation') elif selectedModel == None: self.display_warning("You must select a model before navigating its results") def handle_results_navigation_figures(self): imageChannels = self.resultsNavigationLeft.get_image_channels() self.log.log['componentStates'] = self.resultsNavigationLeft.get_component_states() if not self.statModel: self.statModel,self.statModelClasses = self.model.load_model_results_pickle(self.log.log['selectedModel']) if imageChannels != None: self.selectedResultsChannel1 = imageChannels[0] self.selectedResultsChannel2 = imageChannels[1] self.log.log['resultsChannels'] = [imageChannels[0],imageChannels[1]] print "setting results channels to", self.log.log['resultsChannels'] self.set_state('Results Navigation') else: self.display_warning("You must select a two channels file before viewing a plot") def handle_results_navigation_model_summary(self): print 'handling results naviation model summary' def show_model_log(self): selectedModel = self.resultsNavigationSettings.get_selected_model() fileList = get_fcs_file_names(self.controller.homeDir) fileName = None for file in fileList: m = re.search(re.sub("[\.fcs|\.pickle]","",file),selectedModel) if m: fileName = m.group(0) lineCount = 1 whiteSpace = self.h / 70.0 modelLogFile = self.log.read_model_log(selectedModel) self.resultsSelectCanvas.delete(tk.ALL) logKeys = modelLogFile.keys() logKeys.sort() for key in logKeys: item = modelLogFile[key] self.resultsSelectCanvas.create_text(0.05*self.w,lineCount*whiteSpace,text="%s - %s"%(key,item),fill="black",anchor=tk.W) lineCount+=1 self.status.set("displaying results for %s"%selectedModel) ######################################################################### # # Data processing handles # ######################################################################### def rm_selected_file(self): if self.log.log['selectedFile'] == None: self.display_warning("You must select a file before removing it") else: selectedFile = os.path.join(self.controller.homeDir,'data',self.log.log['selectedFile']) self.controller.rm_fcs_file(selectedFile) self.log.log['selectedFile'] = None self.set_state("Data Processing") def set_selected_transform(self): self.selectedTransform = self.dataProcessingLeft.get_selected_transform() def set_selected_compensation(self): self.selectedCompensation = self.dataProcessingLeft.get_selected_compensation() def message_board_display(self,msg,align='left',withButton=False): if self.lineNumber == 0: self.render_left_side() whiteSpace = self.h / 100.0 self.lineNumber+=1 if self.lineNumber == 1: self.lineNumber+=1 self.message_board_display("Project - %s"%self.controller.projectID,align='left') self.lineNumber+=1 lineHeight = (self.lineNumber * whiteSpace) #+ (0.1 * whiteSpace) self.messageBoard.create_line((0,lineHeight),(0.3*self.w,lineHeight), fill="black") self.lineNumber+=2 if align=='center': self.messageBoard.create_text(0.15*self.w,self.lineNumber*whiteSpace,text=msg,fill="black",tags="line"+str(self.lineNumber),anchor=tk.CENTER) self.lineNumber+=1 elif align == 'left': self.messageBoard.create_text(0.008*self.w,self.lineNumber*whiteSpace,text=msg,fill="black",tags="line"+str(self.lineNumber),anchor=tk.W) self.lineNumber+=1 else: print "ERROR: did not impliment other aligns besides center and left" def show_fcs_file_names(self): self.lineNumber = 0 for fileName in os.listdir(os.path.join(self.controller.homeDir,"data")): if re.search("\.fcs",fileName): self.message_board_display("file - %s"%fileName,align='left', withButton=True) def show_selected_image(self): if self.log.log['currentState'] == 'Quality Assurance': img = self.qualityAssuranceLeft.get_selected_image() if img == None: img = self.qualityAssuranceSettings.ibm.selectedImg img = os.path.split(img)[-1] img = re.sub('\_thumb','',img)[:-4] self.set_state('Quality Assurance',img=img) elif self.log.log['currentState'] == 'Results Navigation': print 'should be handling generatino of image' fileChannels = self.model.get_file_channel_list(self.log.log['selectedFile']) img = self.ibm.selectedImg img = os.path.split(img)[-1] img = re.sub('\_thumb','',img)[:-4] # get the channels from file name imageChannels = [] for chan in fileChannels: if re.search(chan,img): imageChannels.append(chan) self.log.log['componentStates'] = self.resultsNavigationLeft.get_component_states() if not self.statModel: self.statModel,self.statModelClasses = self.model.load_model_results_pickle(self.log.log['selectedModel']) if imageChannels != None: self.selectedResultsChannel1 = imageChannels[0] self.selectedResultsChannel2 = imageChannels[1] self.log.log['resultsChannels'] = [imageChannels[0],imageChannels[1]] print "setting results channels to", self.log.log['resultsChannels'] self.set_state('Results Navigation') def set_subsample(self): if self.dataProcessingLeft.get_subsample() != 'All Data': ss = re.sub("\s","",self.dataProcessingLeft.get_subsample()) else: ss = self.dataProcessingLeft.get_subsample() self.log.log['subsample'] = ss subsamples = ['All Data','1e3','1e4','5e4'] # make sure this matches in dataprocessingleft self.subsampleStates = [tk.NORMAL for st in range(len(subsamples))] if subsamples.__contains__(self.log.log['subsample']) == True: self.subsampleStates[subsamples.index(self.log.log['subsample'])] = tk.ACTIVE def create_new_project(self): tkMessageBox.showinfo(self.controller.appName,"Load a *.fcs file ") fcsFileName = askopenfilename() self.controller.create_new_project(fcsFileName) if self.controller.homeDir != None: self.set_state("Data Processing") ######################################################################### # # Reuseable handles # ######################################################################### def handle_view_all(self): if self.log.log['currentState'] == "Quality Assurance": self.set_state('Quality Assurance') if self.log.log['currentState'] == "Results Navigation": self.selectedResultsChannel1 = None self.selectedResultsChannel2 = None self.log.log['resultsChannels'] = None self.set_state('Results Navigation') def close_image_viewer(self): self.set_state(self.log.log['currentState']) def select_all_comparisons(self): if self.log.log['currentState'] == 'Quality Assurance': selectState = self.qualityAssuranceSettings.get_select_all_state() elif self.log.log['currentState'] == 'Results Navigation': selectState = self.resultsNavigationSettings.get_select_all_state() imgNames = get_img_file_names(self.controller.homeDir) imgNames = [image[:-4] for image in imgNames] if selectState[0] == 1: comparisonStates = [tk.ACTIVE for i in imgNames] else: comparisonStates = None self.set_state(self.log.log['currentState'],comparisonStates=comparisonStates) ############################################################################## # # model handles # ############################################################################## def run_selected_model(self): modelSelectedLong = self.modelSettings.get_selected_model() self.set_state('Progressbar',progressbarMsg="Running '%s'..."%modelSelectedLong) fileList = get_fcs_file_names(self.controller.homeDir) if modelSelectedLong == "Dirichlet Process Mixture Model - CPU Version": self.log.log['modelToRun'] = "dpmm-cpu" elif modelSelectedLong == "Dirichlet Process Mixture Model - GPU Version": self.log.log['modelToRun'] = "dpmm-gpu" elif modelSelectedLong == "Spectral Clustering - Ng Algorithm": self.log.log['modelToRun'] = "sc-ng" else: print "ERROR: run selected model returned a bad model name:%s"%modelSelectedLong self.controller.run_selected_model() self.status.set("Model run finished") ############################################################################## # # functions that control the state transitions # ############################################################################## def handle_state_transition(self): state = self.stateBar.get_current_state() #if self.stateList.__contains__(self.log.log['currentState']): if self.stateList.index(state) > self.log.log['highestState']: self.display_warning('User must follow the flow of the pipeline \n i.e. please do not skip steps') self.render_state() else: ## reset stage specific variables self.resultsMode = 'model select' self.selectedResultsChannel1 = None self.selectedResultsChannel2 = None self.selectedComponents = None self.set_state(state) def processing_to_qa(self): #self.set_selected_channel_states() #if np.array(selectedChannels).sum() < 2: # self.display_error("you must select at least 2 channels") #else: self.set_state('Progressbar',progressbarMsg="Creating all specified images...") self.status.set("addressing subsampling...") self.set_subsample() self.controller.handle_subsampling() self.set_selected_channel_states() self.status.set("Rendering images...") self.controller.process_images() self.status.set("Assess the quality and return to previous steps if necessary") self.set_state('Quality Assurance') def qa_to_model(self): self.set_state('Model') def model_to_results_navigation(self): self.run_selected_model() self.set_state('Progressbar',progressbarMsg="Rendering images'...") self.controller.process_results_images(self.log.log['modelToRun']) self.set_state('Results Navigation') def enable_cluster_selection(self): self.canvas.unbind("<ButtonPress-1>") self.canvas.unbind("<ButtonRelease-1>") self.canvas.bind("<ButtonPress-1>", self.model.press) self.canvas.bind("<ButtonRelease-1>", self.model.release) ############################################################################## # # basic message functions # ############################################################################## def display_info(self,message): tkMessageBox.showinfo(self.controller.appName, message) self.status.set("info: %s"%message) def display_warning(self,message): tkMessageBox.showwarning(self.controller.appName, message) self.status.set("warning: %s"%message) def display_error(self,message): tkMessageBox.showerror(self.controller.appName, message) self.status.set("error: %s"%message) def callback(self): tkMessageBox.showinfo(self.controller.appName, "this function does not exist yet") self.status.set("this function does not exist yet") ############################################################################## # # system specific functions # ############################################################################## def set_default_screen_resolution(self): self.set_state('configuration') def change_default_screen_resolution(self): newRes = self.resolutionSelector.get_selected_resolution() width,height = newRes.split('x') self.w = float(width) self.h = float(height) self.master.geometry("%dx%d+0+0"%(self.w,self.h)) ### create the top area self.topArea.destroy() self.filemenu.destroy() self.leftAreaFrame.destroy() self.rightAreaFrame.destroy() self.centerAreaFrame.destroy() self.topArea = tk.Frame(self.master) self.topArea.pack(fill=tk.X,side=tk.TOP) self.topArea.config(bg=self.bgcolor) ### create frame pieces self.render_menus() self.render_rt_side() self.render_main_canvas() self.render_left_side() self.status.destroy() self.status = StatusBar(self.master,self.controller) self.status.set('resolution has been set to %s'%newRes) ############################################################################## # # status bar class # ############################################################################## class StatusBar(tk.Frame): def __init__(self, master, controller): self.master = master self.controller = controller self.frame = tk.Frame.__init__(self, self.master) self.label = tk.Label(self, bd=1, relief=tk.SUNKEN, anchor=tk.W) self.label.config(text = " Load an existing project or a new FCS file to get started ") self.label.pack(fill=tk.X) self.pack(side = tk.BOTTOM) def set(self, msg): self.label.config(text = msg) self.label.update_idletasks() def clear(self): self.label.config(text="") self.label.update_idletasks()
gpl-3.0
1,384,668,016,793,105,700
45.543353
171
0.586538
false
ajrbyers/paywallwatch
src/blog/views.py
1
1718
from django.shortcuts import redirect, render, get_object_or_404 from django.core.urlresolvers import reverse from django.utils import timezone from django.contrib import messages from django.contrib.auth.decorators import login_required from blog import models from blog import forms # Create your views here. @login_required def new(request): form = forms.PostEdit() if request.POST: form = forms.PostEdit(request.POST) if form.is_valid(): updated_post = form.save(commit=False) if updated_post.status == 2: updated_post.publish = timezone.now() updated_post.author = request.user updated_post.save() if updated_post.status == 2: messages.add_message(request, messages.SUCCESS, 'Post saved and published.') return redirect(reverse('dashboard')) else: messages.add_message(request, messages.INFO, 'Draft post saved.') template = 'edit_post.html' context = { 'form': form, } return render(request, template, context) @login_required def edit(request, slug): post = get_object_or_404(models.Post, slug=slug) current_post_status = post.status form = forms.PostEdit(instance=post) if request.POST: form = forms.PostEdit(request.POST, instance=post) if form.is_valid(): updated_post = form.save(commit=False) if current_post_status == 1 and updated_post.status == 2: updated_post.publish = timezone.now() updated_post.save() return redirect('dashboard') template = 'edit_post.html' context = { 'form': form, } return render(request, template, context) def post(request, slug): post = get_object_or_404(models.Post, slug=slug) template = 'post.html' context = { 'post': post, } return render(request, template, context)
gpl-2.0
8,025,140,357,845,172,000
23.197183
80
0.715367
false
google/checkers_classic
examples/quickstart/example8_test.py
1
2918
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example Checkers test run that checks 1 + 1 = 2 with a shutdown hook.""" from checkers.examples.quickstart.calculator import Calculator from checkers.python import checkers from checkers.python.integrations.hamcrest import AssertThat from checkers.python.integrations.hamcrest import EqualTo from checkers.python.integrations.hamcrest import Is from checkers.python.integrations.pyunit import pyunit from checkers.python.test_result import TestResult def ContextProcessorShutdownHook(context): def _ShutdownHook(): print 'it\'s checkers, foo!!', TestResult.String(context.test_result) return _ShutdownHook @checkers.Test def TestOnePlusOneEqualsTwo(context): print '1 + 1 = 2' AssertThat(1 + 1, Is(EqualTo(2))) checkers.RegisterShutdownHook(context, ContextProcessorShutdownHook(context)) @checkers.Test def TestTwoPlusTwoEqualsFour(_, calculator): AssertThat(2 + 2, Is(EqualTo(calculator.Add(2, 2)))) def CreateTestRun(): """Test run that will execute the defined test.""" test_run = checkers.TestRun() test_run.RegisterComponent('calculator', Calculator()) test_run.LoadTestCase(TestOnePlusOneEqualsTwo) test_run.RegisterSetUpFunction(SetUp1) test_run.RegisterSetUpFunction(SetUp2) test_run.RegisterTearDownFunction(TearDown1) test_run.RegisterTearDownFunction(TearDown2) return test_run def SetUp1(test_run): print 'running first setup method for test_run %s' % test_run.name def SetUp2(test_run): print 'running second setup method for test_run %s' % test_run.name def TearDown1(test_run): print 'running first teardown method for test_run %s' % test_run.name def TearDown2(test_run): print 'running second teardown method for test_run %s' % test_run.name if __name__ == '__main__': # To run a test method directly ctx = checkers.DefaultContext(TestOnePlusOneEqualsTwo) TestOnePlusOneEqualsTwo(ctx) checkers.RunTest(TestOnePlusOneEqualsTwo) # Extra arguments just get ignored by checkers, but the more # correct behavior would probably be to have this raise an error. checkers.RunTest(TestOnePlusOneEqualsTwo, Calculator()) # ctx = checkers.DefaultContext(TestTwoPlusTwoEqualsFour) # TestTwoPlusTwoEqualsFour(Calculator()) checkers.RunTest(TestTwoPlusTwoEqualsFour, Calculator()) pyunit.main(CreateTestRun())
apache-2.0
747,112,724,701,050,400
33.329412
79
0.767306
false
reillyeon/webpixels
server.py
1
5717
from datetime import timedelta from flask import Flask, redirect, render_template, request, url_for import json from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.wsgi import WSGIContainer from webpixels import PixelSet, RgbPixel from webpixels.controller import ColorKinetics app = Flask(__name__) ioloop = IOLoop.instance() config_file = None channels = {} pixels = {} fixtures = {} presets = {} last_preset = None def load_config(config_file): with open(config_file) as f: config = json.loads(f.read()) for name, controllerConfig in config['controllers'].items(): controllerType = controllerConfig['type'] if controllerType == 'ColorKinetics': controller = ColorKinetics(name, controllerConfig['host']) for channel in controller.channels: channels[channel.get_name()] = channel for name, pixelConfig in config['pixels'].items(): chan_set = [channels[channel] for channel in pixelConfig['channels']] pixel = RgbPixel(name, *chan_set) pixels[pixel.get_name()] = pixel for name, fixtureConfig in config['fixtures'].items(): pixel_set = [pixels[pixel] for pixel in fixtureConfig['pixels']] fixture = PixelSet(name, pixel_set) fixtures[fixture.get_name()] = fixture global all_pixel all_pixel = PixelSet('all', pixels.values()) if 'presets' in config: presets.update(config['presets']) def save_config(config_file): controller_set = set() saved_controllers = {} saved_pixels = {} saved_fixtures = {} for pixel in pixels.values(): controller_set.update(pixel.get_controllers()) saved_pixels[pixel.get_name()] = { 'channels': [ pixel.red.get_name(), pixel.green.get_name(), pixel.blue.get_name() ] } for fixture in fixtures.values(): saved_fixtures[fixture.get_name()] = { 'pixels': [subpixel.get_name() for subpixel in fixture.get_pixels()] } for controller in controller_set: if isinstance(controller, ColorKinetics): controller_type = "ColorKinetics" saved_controllers[controller.get_name()] = { 'host': controller.host, 'type': controller_type } save_data = json.dumps({ 'controllers': saved_controllers, 'pixels': saved_pixels, 'fixtures': saved_fixtures, 'presets': presets }, sort_keys=True, indent=2, separators=(',', ': ')) with open(config_file, 'w') as f: f.write(save_data) def redirect_url(): return redirect(request.args.get('next') or \ request.referrer or \ url_for('index')) fade_in_progress = False def fade_step(): global fade_in_progress need_more = False controller_set = set() for pixel in pixels.values(): if pixel.step(): need_more = True controller_set.update(pixel.get_controllers()) for controller in controller_set: controller.sync() if need_more: ioloop.add_timeout(timedelta(milliseconds=25), fade_step) else: fade_in_progress = False def start_fade(): global fade_in_progress if fade_in_progress: return fade_in_progress = True fade_step() @app.route('/', methods=['GET']) def index(): fixture_list = [] for name, fixture in fixtures.items(): subpixels = [(pixel.get_name(), pixel.get_html_color()) for pixel in fixture.get_pixels()] fixture_list.append((name, fixture.get_html_color(), subpixels)) fixture_list.sort(key=lambda fixture: fixture[0]) return render_template('index.html', all=all_pixel.get_html_color(), fixtures=fixture_list) @app.route('/pixel/<name>', methods=['GET', 'POST']) def pixel(name): if name == 'all': pixel = all_pixel else: pixel = fixtures.get(name) if pixel is None: pixel = pixels[name] if request.method == 'POST': return pixel_post(pixel) else: return pixel_get(pixel) def pixel_post(pixel): r = int(request.form['r']) g = int(request.form['g']) b = int(request.form['b']) pixel.set_target(r, g, b) start_fade() return "" def pixel_get(pixel): r, g, b = pixel.get() return render_template('pixel.html', pixel=pixel.get_name(), r=r, g=g, b=b) @app.route('/presets', methods=['GET']) def preset_list(): preset_list = list(presets.keys()) preset_list.sort() return render_template('presets.html', presets=preset_list, last_preset=last_preset) @app.route('/preset/save', methods=['POST']) def preset_save(): preset = {} for name, pixel in pixels.items(): preset[name] = pixel.get() presets[request.form['name']] = preset save_config(config_file) global last_preset last_preset = request.form['name'] return "" @app.route('/preset/apply', methods=['POST']) def preset_apply(): name = request.form['preset'] preset = presets[name] for name, value in preset.items(): pixel = pixels[name] pixel.set_target(*value) start_fade() global last_preset last_preset = name return "" @app.route('/preset/delete', methods=['POST']) def preset_delete(): del presets[request.form['name']] save_config(config_file) return "" if __name__ == '__main__': import sys if len(sys.argv) != 2: print("Usage: python server.py config.json") config_file = sys.argv[1] load_config(config_file) app.debug = True http_server = HTTPServer(WSGIContainer(app)) http_server.listen(80) ioloop.start()
mit
-5,249,589,999,947,229,000
24.29646
77
0.617107
false
zestrada/nova-cs498cc
nova/network/api.py
1
22070
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect from nova.compute import instance_types from nova.db import base from nova import exception from nova.network import floating_ips from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova.openstack.common import log as logging from nova import policy from nova import utils LOG = logging.getLogger(__name__) def refresh_cache(f): """ Decorator to update the instance_info_cache Requires context and instance as function args """ argspec = inspect.getargspec(f) @functools.wraps(f) def wrapper(self, context, *args, **kwargs): res = f(self, context, *args, **kwargs) try: # get the instance from arguments (or raise ValueError) instance = kwargs.get('instance') if not instance: instance = args[argspec.args.index('instance') - 2] except ValueError: msg = _('instance is a required argument to use @refresh_cache') raise Exception(msg) update_instance_cache_with_nw_info(self, context, instance, nw_info=res, conductor_api=kwargs.get('conductor_api')) # return the original function's return value return res return wrapper def update_instance_cache_with_nw_info(api, context, instance, nw_info=None, conductor_api=None): try: if not isinstance(nw_info, network_model.NetworkInfo): nw_info = None if not nw_info: nw_info = api._get_instance_nw_info(context, instance) # update cache cache = {'network_info': nw_info.json()} if conductor_api: conductor_api.instance_info_cache_update(context, instance, cache) else: api.db.instance_info_cache_update(context, instance['uuid'], cache) except Exception: LOG.exception(_('Failed storing info cache'), instance=instance) def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution.""" @functools.wraps(func) def wrapped(self, context, *args, **kwargs): action = func.__name__ check_policy(context, action) return func(self, context, *args, **kwargs) return wrapped def check_policy(context, action): target = { 'project_id': context.project_id, 'user_id': context.user_id, } _action = 'network:%s' % action policy.enforce(context, _action, target) class API(base.Base): """API for doing networking via the nova-network network manager. This is a pluggable module - other implementations do networking via other services (such as Quantum). """ _sentinel = object() def __init__(self, **kwargs): self.network_rpcapi = network_rpcapi.NetworkAPI() helper = utils.ExceptionHelper # NOTE(vish): this local version of floating_manager has to convert # ClientExceptions back since they aren't going over rpc. self.floating_manager = helper(floating_ips.LocalManager()) super(API, self).__init__(**kwargs) @wrap_check_policy def get_all(self, context): try: return self.db.network_get_all(context) except exception.NoNetworksFound: return [] @wrap_check_policy def get(self, context, network_uuid): return self.db.network_get_by_uuid(context.elevated(), network_uuid) @wrap_check_policy def create(self, context, **kwargs): return self.network_rpcapi.create_networks(context, **kwargs) @wrap_check_policy def delete(self, context, network_uuid): return self.network_rpcapi.delete_network(context, network_uuid, None) @wrap_check_policy def disassociate(self, context, network_uuid): network = self.get(context, network_uuid) self.db.network_disassociate(context, network['id']) @wrap_check_policy def get_fixed_ip(self, context, id): return self.db.fixed_ip_get(context, id) @wrap_check_policy def get_fixed_ip_by_address(self, context, address): return self.db.fixed_ip_get_by_address(context, address) @wrap_check_policy def get_floating_ip(self, context, id): return self.db.floating_ip_get(context, id) @wrap_check_policy def get_floating_ip_pools(self, context): return self.db.floating_ip_get_pools(context) @wrap_check_policy def get_floating_ip_by_address(self, context, address): return self.db.floating_ip_get_by_address(context, address) @wrap_check_policy def get_floating_ips_by_project(self, context): return self.db.floating_ip_get_all_by_project(context, context.project_id) @wrap_check_policy def get_floating_ips_by_fixed_address(self, context, fixed_address): floating_ips = self.db.floating_ip_get_by_fixed_address(context, fixed_address) return [floating_ip['address'] for floating_ip in floating_ips] @wrap_check_policy def get_backdoor_port(self, context, host): return self.network_rpcapi.get_backdoor_port(context, host) @wrap_check_policy def get_instance_id_by_floating_address(self, context, address): fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address) if fixed_ip is None: return None else: return fixed_ip['instance_uuid'] @wrap_check_policy def get_vifs_by_instance(self, context, instance): vifs = self.db.virtual_interface_get_by_instance(context, instance['uuid']) for vif in vifs: if vif.get('network_id') is not None: network = self.db.network_get(context, vif['network_id'], project_only="allow_none") vif['net_uuid'] = network['uuid'] return vifs @wrap_check_policy def get_vif_by_mac_address(self, context, mac_address): vif = self.db.virtual_interface_get_by_address(context, mac_address) if vif.get('network_id') is not None: network = self.db.network_get(context, vif['network_id'], project_only="allow_none") vif['net_uuid'] = network['uuid'] return vif @wrap_check_policy def allocate_floating_ip(self, context, pool=None): """Adds (allocates) a floating ip to a project from a pool.""" return self.floating_manager.allocate_floating_ip(context, context.project_id, False, pool) @wrap_check_policy def release_floating_ip(self, context, address, affect_auto_assigned=False): """Removes (deallocates) a floating ip with address from a project.""" return self.floating_manager.deallocate_floating_ip(context, address, affect_auto_assigned) @wrap_check_policy @refresh_cache def associate_floating_ip(self, context, instance, floating_address, fixed_address, affect_auto_assigned=False): """Associates a floating ip with a fixed ip. Ensures floating ip is allocated to the project in context. Does not verify ownership of the fixed ip. Caller is assumed to have checked that the instance is properly owned. """ orig_instance_uuid = self.floating_manager.associate_floating_ip( context, floating_address, fixed_address, affect_auto_assigned) if orig_instance_uuid: msg_dict = dict(address=floating_address, instance_id=orig_instance_uuid) LOG.info(_('re-assign floating IP %(address)s from ' 'instance %(instance_id)s') % msg_dict) orig_instance = self.db.instance_get_by_uuid(context, orig_instance_uuid) # purge cached nw info for the original instance update_instance_cache_with_nw_info(self, context, orig_instance) @wrap_check_policy @refresh_cache def disassociate_floating_ip(self, context, instance, address, affect_auto_assigned=False): """Disassociates a floating ip from fixed ip it is associated with.""" return self.floating_manager.disassociate_floating_ip(context, address, affect_auto_assigned) @wrap_check_policy @refresh_cache def allocate_for_instance(self, context, instance, vpn, requested_networks, macs=None, conductor_api=None, security_groups=None): """Allocates all network structures for an instance. TODO(someone): document the rest of these parameters. :param macs: None or a set of MAC addresses that the instance should use. macs is supplied by the hypervisor driver (contrast with requested_networks which is user supplied). :returns: network info as from get_instance_nw_info() below """ # NOTE(vish): We can't do the floating ip allocation here because # this is called from compute.manager which shouldn't # have db access so we do it on the other side of the # rpc. instance_type = instance_types.extract_instance_type(instance) args = {} args['vpn'] = vpn args['requested_networks'] = requested_networks args['instance_id'] = instance['uuid'] args['project_id'] = instance['project_id'] args['host'] = instance['host'] args['rxtx_factor'] = instance_type['rxtx_factor'] args['macs'] = macs nw_info = self.network_rpcapi.allocate_for_instance(context, **args) return network_model.NetworkInfo.hydrate(nw_info) @wrap_check_policy def deallocate_for_instance(self, context, instance): """Deallocates all network structures related to instance.""" # NOTE(vish): We can't do the floating ip deallocation here because # this is called from compute.manager which shouldn't # have db access so we do it on the other side of the # rpc. args = {} args['instance_id'] = instance['uuid'] args['project_id'] = instance['project_id'] args['host'] = instance['host'] self.network_rpcapi.deallocate_for_instance(context, **args) # NOTE(danms): Here for quantum compatibility def allocate_port_for_instance(self, context, instance, port_id, network_id=None, requested_ip=None, conductor_api=None): raise NotImplementedError() # NOTE(danms): Here for quantum compatibility def deallocate_port_for_instance(self, context, instance, port_id, conductor_api=None): raise NotImplementedError() # NOTE(danms): Here for quantum compatibility def list_ports(self, *args, **kwargs): raise NotImplementedError() # NOTE(danms): Here for quantum compatibility def show_port(self, *args, **kwargs): raise NotImplementedError() @wrap_check_policy @refresh_cache def add_fixed_ip_to_instance(self, context, instance, network_id, conductor_api=None): """Adds a fixed ip to instance from specified network.""" instance_type = instance_types.extract_instance_type(instance) args = {'instance_id': instance['uuid'], 'rxtx_factor': instance_type['rxtx_factor'], 'host': instance['host'], 'network_id': network_id} self.network_rpcapi.add_fixed_ip_to_instance(context, **args) @wrap_check_policy @refresh_cache def remove_fixed_ip_from_instance(self, context, instance, address, conductor_api=None): """Removes a fixed ip from instance from specified network.""" instance_type = instance_types.extract_instance_type(instance) args = {'instance_id': instance['uuid'], 'rxtx_factor': instance_type['rxtx_factor'], 'host': instance['host'], 'address': address} self.network_rpcapi.remove_fixed_ip_from_instance(context, **args) @wrap_check_policy def add_network_to_project(self, context, project_id, network_uuid=None): """Force adds another network to a project.""" self.network_rpcapi.add_network_to_project(context, project_id, network_uuid) @wrap_check_policy def associate(self, context, network_uuid, host=_sentinel, project=_sentinel): """Associate or disassociate host or project to network.""" associations = {} network_id = self.get(context, network_uuid)['id'] if host is not API._sentinel: if host is None: self.db.network_disassociate(context, network_id, disassociate_host=True, disassociate_project=False) else: self.db.network_set_host(context, network_id, host) if project is not API._sentinel: project = associations['project'] if project is None: self.db.network_disassociate(context, network_id, disassociate_host=False, disassociate_project=True) else: self.db.network_associate(context, project, network_id, True) @wrap_check_policy def get_instance_nw_info(self, context, instance, conductor_api=None): """Returns all network info related to an instance.""" result = self._get_instance_nw_info(context, instance) update_instance_cache_with_nw_info(self, context, instance, result, conductor_api) return result def _get_instance_nw_info(self, context, instance): """Returns all network info related to an instance.""" instance_type = instance_types.extract_instance_type(instance) args = {'instance_id': instance['uuid'], 'rxtx_factor': instance_type['rxtx_factor'], 'host': instance['host'], 'project_id': instance['project_id']} nw_info = self.network_rpcapi.get_instance_nw_info(context, **args) return network_model.NetworkInfo.hydrate(nw_info) @wrap_check_policy def validate_networks(self, context, requested_networks): """validate the networks passed at the time of creating the server """ return self.network_rpcapi.validate_networks(context, requested_networks) @wrap_check_policy def get_instance_uuids_by_ip_filter(self, context, filters): """Returns a list of dicts in the form of {'instance_uuid': uuid, 'ip': ip} that matched the ip_filter """ return self.network_rpcapi.get_instance_uuids_by_ip_filter(context, filters) @wrap_check_policy def get_dns_domains(self, context): """Returns a list of available dns domains. These can be used to create DNS entries for floating ips. """ return self.network_rpcapi.get_dns_domains(context) @wrap_check_policy def add_dns_entry(self, context, address, name, dns_type, domain): """Create specified DNS entry for address.""" args = {'address': address, 'name': name, 'dns_type': dns_type, 'domain': domain} return self.network_rpcapi.add_dns_entry(context, **args) @wrap_check_policy def modify_dns_entry(self, context, name, address, domain): """Create specified DNS entry for address.""" args = {'address': address, 'name': name, 'domain': domain} return self.network_rpcapi.modify_dns_entry(context, **args) @wrap_check_policy def delete_dns_entry(self, context, name, domain): """Delete the specified dns entry.""" args = {'name': name, 'domain': domain} return self.network_rpcapi.delete_dns_entry(context, **args) @wrap_check_policy def delete_dns_domain(self, context, domain): """Delete the specified dns domain.""" return self.network_rpcapi.delete_dns_domain(context, domain=domain) @wrap_check_policy def get_dns_entries_by_address(self, context, address, domain): """Get entries for address and domain.""" args = {'address': address, 'domain': domain} return self.network_rpcapi.get_dns_entries_by_address(context, **args) @wrap_check_policy def get_dns_entries_by_name(self, context, name, domain): """Get entries for name and domain.""" args = {'name': name, 'domain': domain} return self.network_rpcapi.get_dns_entries_by_name(context, **args) @wrap_check_policy def create_private_dns_domain(self, context, domain, availability_zone): """Create a private DNS domain with nova availability zone.""" args = {'domain': domain, 'av_zone': availability_zone} return self.network_rpcapi.create_private_dns_domain(context, **args) @wrap_check_policy def create_public_dns_domain(self, context, domain, project=None): """Create a public DNS domain with optional nova project.""" args = {'domain': domain, 'project': project} return self.network_rpcapi.create_public_dns_domain(context, **args) @wrap_check_policy def setup_networks_on_host(self, context, instance, host=None, teardown=False): """Setup or teardown the network structures on hosts related to instance""" host = host or instance['host'] # NOTE(tr3buchet): host is passed in cases where we need to setup # or teardown the networks on a host which has been migrated to/from # and instance['host'] is not yet or is no longer equal to args = {'instance_id': instance['id'], 'host': host, 'teardown': teardown} self.network_rpcapi.setup_networks_on_host(context, **args) def _is_multi_host(self, context, instance): try: fixed_ips = self.db.fixed_ip_get_by_instance(context, instance['uuid']) except exception.FixedIpNotFoundForInstance: return False network = self.db.network_get(context, fixed_ips[0]['network_id'], project_only='allow_none') return network['multi_host'] def _get_floating_ip_addresses(self, context, instance): floating_ips = self.db.instance_floating_address_get_all(context, instance['uuid']) return [floating_ip['address'] for floating_ip in floating_ips] @wrap_check_policy def migrate_instance_start(self, context, instance, migration): """Start to migrate the network of an instance.""" instance_type = instance_types.extract_instance_type(instance) args = dict( instance_uuid=instance['uuid'], rxtx_factor=instance_type['rxtx_factor'], project_id=instance['project_id'], source_compute=migration['source_compute'], dest_compute=migration['dest_compute'], floating_addresses=None, ) if self._is_multi_host(context, instance): args['floating_addresses'] = \ self._get_floating_ip_addresses(context, instance) args['host'] = migration['source_compute'] self.network_rpcapi.migrate_instance_start(context, **args) @wrap_check_policy def migrate_instance_finish(self, context, instance, migration): """Finish migrating the network of an instance.""" instance_type = instance_types.extract_instance_type(instance) args = dict( instance_uuid=instance['uuid'], rxtx_factor=instance_type['rxtx_factor'], project_id=instance['project_id'], source_compute=migration['source_compute'], dest_compute=migration['dest_compute'], floating_addresses=None, ) if self._is_multi_host(context, instance): args['floating_addresses'] = \ self._get_floating_ip_addresses(context, instance) args['host'] = migration['dest_compute'] self.network_rpcapi.migrate_instance_finish(context, **args)
apache-2.0
-876,188,785,188,179,500
40.407129
79
0.603761
false
philterphactory/prosthetic-runner
gaeauth/backends.py
1
1594
from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.backends import ModelBackend from google.appengine.api import users class GoogleAccountBackend(ModelBackend): """ backend for authentication via Google Accounts on Google App Engine A Django auth.contrib.models.User object is linked to a Google Account via the password field, that stores the unique Google Account ID The Django User object is created the first time a user logs in with his Google Account. """ def authenticate(self, **credentials): g_user = users.get_current_user() if g_user == None: return None username = g_user.email().split('@')[0] if hasattr(settings, 'ALLOWED_USERS'): try: settings.ALLOWED_USERS.index(username) except ValueError: return None try: user = User.objects.get(password=g_user.user_id()) if user.email is not g_user.email(): user.email = g_user.email() user.username = username user.save() return user except User.DoesNotExist: user = User.objects.create_user(username,\ g_user.email()) user.password = g_user.user_id() if users.is_current_user_admin(): user.is_staff = True user.is_superuser = True user.save() return user
mit
494,470,673,728,323,100
30.88
64
0.564617
false
Frenesius/CrawlerProject56
crawler/spiders/SpecsCrawler.py
1
7573
__author__ = 'j' import Config as config import SpecsSpider class GraphicsCardSpider(SpecsSpider.SpecsSpider): name = "GPUcrawl" #name to craw, gets used to get the start_urls[] label = "GRAPHICSCARD" #Name of the Label that needs to be added to the Crawled Node pathName = "GPUpath" #Used to get ConfigFile relation = "GRAPHICSCARD" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "GPU" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class ProcessorSpider(SpecsSpider.SpecsSpider): name = "PROCESSORcrawl" #Name to craw, gets used to get the start_urls[] label = "PROCESSOR" #Name of the Label that needs to be added to the Crawled Node pathName = "PROCESSORpath" #Used to get ConfigFile relation = "PROCESSOR" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "CPU" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class MemorySpider(SpecsSpider.SpecsSpider): name = "MEMORYcrawl" #Name to craw, gets used to get the start_urls[] label = "MEMORY" #Name of the Label that needs to be added to the Crawled Node pathName = "MEMORYpath" #Used to get ConfigFile relation = "MEMORY" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "MEMORY" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class PSUSpider(SpecsSpider.SpecsSpider): name = "PSUcrawl" #Name to craw, gets used to get the start_urls[] label = "PSU" #Name of the Label that needs to be added to the Crawled Node pathName = "PSUpath" #Used to get ConfigFile relation = "PSU" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "PSU" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class MotherboardSpider(SpecsSpider.SpecsSpider): name = "MOTHERBOARDcrawl" #Name to craw, gets used to get the start_urls[] label = "MOTHERBOARD" #Name of the Label that needs to be added to the Crawled Node pathName = "MOTHERBOARDpath" #Used to get ConfigFile relation = "MOTHERBOARD" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "MOTHERBOARD" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class CaseSpider(SpecsSpider.SpecsSpider): name = "CASEcrawl" #Name to craw, gets used to get the start_urls[] label = "CASE" #Name of the Label that needs to be added to the Crawled Node pathName = "CASEpath" #Used to get ConfigFile relation = "CASE" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "CASE" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class SoundcardSpider(SpecsSpider.SpecsSpider): name = "SOUNDCARDcrawl" #Name to craw, gets used to get the start_urls[] label = "SOUNDCARD" #Name of the Label that needs to be added to the Crawled Node pathName = "SOUNDCARDpath" #Used to get ConfigFile relation = "SOUNDCARD" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "SOUNDCARD" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class SSDSpider(SpecsSpider.SpecsSpider): name = "SSDcrawl" #Name to craw, gets used to get the start_urls[] label = "SSD" #Name of the Label that needs to be added to the Crawled Node pathName = "SSDpath" #Used to get ConfigFile relation = "SSD" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "SSD" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class HDDSpider(SpecsSpider.SpecsSpider): name = "HDDcrawl" #Name to craw, gets used to get the start_urls[] label = "HDD" #Name of the Label that needs to be added to the Crawled Node pathName = "HDDpath" #Used to get ConfigFile relation = "HDD" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "HDD" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label) class OpticalDriveSpider(SpecsSpider.SpecsSpider): name = "OPTICALDRIVEcrawl" #Name to craw, gets used to get the start_urls[] label = "OPTICALDRIVE" #Name of the Label that needs to be added to the Crawled Node pathName = "OPTICALDRIVEpath" #Used to get ConfigFile relation = "OPTICALDRIVE" #Name of the relation between the BaseNode and Crawled Node JSONfilename = "OPTICALDRIVE" start_urls = [] if name in config.componentList: start_urls = config.componentList[name] path = config.componentList[pathName] else: print("ERROR: key does not exist in dictonairy") def parse(self, response): SpecsSpider.SpecsSpider.parseSource(self, response, self.JSONfilename, self.label)
gpl-3.0
6,510,522,405,530,483,000
48.181818
102
0.665918
false
yetone/script-manager
script_manager/compat/__init__.py
1
2603
__author__ = 'yetone' import sys import inspect from collections import OrderedDict PY2 = sys.version_info[0] == 2 if not PY2: text_type = str izip = zip def iteritems(dict): return iter(dict.items()) else: text_type = unicode from itertools import izip def iteritems(dict): return dict.iteritems() def to_str(cls): if '__str__' in cls.__dict__: return cls # pragma: no cover def __str__(self): return '{}({})'.format( # pragma: no cover self.__class__.__name__, ', '.join( '{}={}'.format(k, repr(v)) for k, v in iteritems(self.__dict__) ) ) cls.__str__ = __str__ if '__repr__' not in cls.__dict__: cls.__repr__ = cls.__str__ return cls @to_str class ArgSpec(object): def __init__(self, argspec): self.varargs = argspec.varargs if hasattr(argspec, 'varkw'): self.varkw = argspec.varkw # pragma: no cover self.kwonlyargs = OrderedDict( # pragma: no cover (k, argspec.kwonlydefaults.get(k)) for k in argspec.kwonlyargs ) self.annotations = argspec.annotations else: self.varkw = argspec.keywords # pragma: no cover self.kwonlyargs = OrderedDict() # pragma: no cover self.annotations = {} args = argspec.args defaults = argspec.defaults or [] dl = len(defaults) if dl != 0: args = args[: -dl] # pragma: no cover defaults = zip(argspec.args[-dl:], defaults) # pragma: no cover self.args = args self.defaults = OrderedDict(defaults) def getargspec(func): if hasattr(inspect, 'getfullargspec'): argspec = inspect.getfullargspec(func) # pragma: no cover else: argspec = inspect.getargspec(func) # pragma: no cover return ArgSpec(argspec) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) if hasattr(cls, '__qualname__'): orig_vars['__qualname__'] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper
mit
-3,799,375,042,173,222,000
27.922222
76
0.542451
false
all-umass/graphs
graphs/construction/incremental.py
1
1809
from __future__ import absolute_import import numpy as np from sklearn.metrics import pairwise_distances from graphs import Graph __all__ = ['incremental_neighbor_graph'] def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None, weighting='none'): '''See neighbor_graph.''' assert ((k is not None) or (epsilon is not None) ), "Must provide `k` or `epsilon`" assert (_issequence(k) ^ _issequence(epsilon) ), "Exactly one of `k` or `epsilon` must be a sequence." assert weighting in ('binary','none'), "Invalid weighting param: " + weighting is_weighted = weighting == 'none' if precomputed: D = X else: D = pairwise_distances(X, metric='euclidean') # pre-sort for efficiency order = np.argsort(D)[:,1:] if k is None: k = D.shape[0] # generate the sequence of graphs # TODO: convert the core of these loops to Cython for speed W = np.zeros_like(D) I = np.arange(D.shape[0]) if _issequence(k): # varied k, fixed epsilon if epsilon is not None: D[D > epsilon] = 0 old_k = 0 for new_k in k: idx = order[:, old_k:new_k] dist = D[I, idx.T] W[I, idx.T] = dist if is_weighted else 1 yield Graph.from_adj_matrix(W) old_k = new_k else: # varied epsilon, fixed k idx = order[:,:k] dist = D[I, idx.T].T old_i = np.zeros(D.shape[0], dtype=int) for eps in epsilon: for i, row in enumerate(dist): oi = old_i[i] ni = oi + np.searchsorted(row[oi:], eps) rr = row[oi:ni] W[i, idx[i,oi:ni]] = rr if is_weighted else 1 old_i[i] = ni yield Graph.from_adj_matrix(W) def _issequence(x): # Note: isinstance(x, collections.Sequence) fails for numpy arrays return hasattr(x, '__len__')
mit
130,368,169,209,905,540
27.714286
80
0.60199
false
rosenvladimirov/addons
product_margin_classification_bg/models/purchase.py
1
3601
# -*- coding: utf-8 -*- # # # Copyright 2017 Rosen Vladimirov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # from openerp import models, fields, api, _ import logging _logger = logging.getLogger(__name__) class purchase_order_line(models.Model): _inherit = 'purchase.order.line' margin_classification_id = fields.Many2one( comodel_name='product.margin.classification', string='Margin Classification', readonly=True) def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id, partner_id, date_order=False, fiscal_position_id=False, date_planned=False, name=False, price_unit=False, state='draft', context=None ): vals = super(purchase_order_line, self).onchange_product_id( cr, uid, ids, pricelist_id, product_id, qty, uom_id, partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,name=name, price_unit=price_unit, state=state, context=context ) if not product_id: return vals product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) #call name_get() with partner in the context to eventually match name and description in the seller_ids field if product: vals['value'].update({'margin_classification_id': product.product_tmpl_id.margin_classification_id}) return vals # @api.model # def _check_product_template(self): # lines = [] # for line in self.order_line: # template = False # tmpl = line.product_id.product_tmpl_id # if tmpl.margin_state in ('cheap', 'expensive'): # if not template: # lines.append((0, 0, { # 'product_tmpl_id': tmpl.id, # })) # return lines # # @api.multi # def purchase_confirm(self): # self.ensure_one() # super(PurchaseOrder, self).purchase_confirm() # lines_for_update = self._check_product_template() # if lines_for_update: # ctx = {'default_wizard_line_ids': lines_for_update} # pmc_checker_form = self.env.ref( # 'product_margin_classification_bg.' # 'view_product_template_mc_check_form', False) # return { # 'name': _("There is probably a changed cost price. Please check for possible consequences for final customer prices."), # 'type': 'ir.actions.act_window', # 'view_mode': 'form', # 'res_model': 'product.margin.classification.check', # 'views': [(pmc_checker_form.id, 'form')], # 'view_id': pmc_checker_form.id, # 'target': 'new', # 'context': ctx, # } # else: # self.signal_workflow('purchase_confirm')
agpl-3.0
5,857,277,351,039,150,000
40.872093
136
0.600944
false
psiinon/addons-server
src/olympia/constants/applications.py
1
5984
import re from django.utils.translation import ugettext_lazy as _ from .base import ( ADDON_DICT, ADDON_EXTENSION, ADDON_LPAPP, ADDON_PLUGIN, ADDON_SEARCH, ADDON_STATICTHEME, ADDON_THEME) from olympia.versions.compare import version_int as vint class App(object): @classmethod def matches_user_agent(cls, user_agent): return cls.user_agent_string in user_agent # Applications class FIREFOX(App): id = 1 shortername = 'fx' short = 'firefox' pretty = _(u'Firefox') browser = True types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP, ADDON_PLUGIN, ADDON_STATICTHEME] guid = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}' min_display_version = 3.0 # These versions were relabeled and should not be displayed. exclude_versions = (3.1, 3.7, 4.2) user_agent_string = 'Firefox' platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms) @classmethod def matches_user_agent(cls, user_agent): matches = cls.user_agent_string in user_agent if ('Android' in user_agent or 'Mobile' in user_agent or 'Tablet' in user_agent): matches = False return matches class THUNDERBIRD(App): id = 18 short = 'thunderbird' shortername = 'tb' pretty = _(u'Thunderbird') browser = False types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP] guid = '{3550f703-e582-4d05-9a08-453d09bdfdc6}' min_display_version = 1.0 user_agent_string = 'Thunderbird' platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms) class SEAMONKEY(App): id = 59 short = 'seamonkey' shortername = 'sm' pretty = _(u'SeaMonkey') browser = True types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP, ADDON_PLUGIN] guid = '{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}' min_display_version = 1.0 exclude_versions = (1.5,) latest_version = None user_agent_string = 'SeaMonkey' platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms) class SUNBIRD(App): """This application is retired and should not be used on the site. It remains as there are still some sunbird add-ons in the db.""" id = 52 short = 'sunbird' shortername = 'sb' pretty = _(u'Sunbird') browser = False types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP] guid = '{718e30fb-e89b-41dd-9da7-e25a45638b28}' min_display_version = 0.2 latest_version = None user_agent_string = 'Sunbird' platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms) class MOBILE(App): """Old Firefox for Mobile. Not supported anymore, should not be added to APPS.""" id = 60 short = 'mobile' shortername = 'fn' pretty = _(u'Mobile') browser = True types = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP] guid = '{a23983c0-fd0e-11dc-95ff-0800200c9a66}' min_display_version = 0.1 user_agent_string = 'Fennec' platforms = 'mobile' # DESKTOP_PLATFORMS (set in constants.platforms) class ANDROID(App): # This is for the Android native Firefox. id = 61 short = 'android' shortername = 'an' pretty = _(u'Firefox for Android') browser = True types = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP] guid = '{aa3c5121-dab2-40e2-81ca-7ea25febc110}' min_display_version = 11.0 user_agent_string = 'Fennec' # Mobile and Android have the same user agent. The only way to distinguish # is by the version number. user_agent_re = [re.compile(r'Fennec/([\d.]+)'), re.compile(r'Android; Mobile; rv:([\d.]+)'), re.compile(r'Android; Tablet; rv:([\d.]+)'), re.compile(r'Mobile; rv:([\d.]+)'), re.compile(r'Tablet; rv:([\d.]+)')] platforms = 'mobile' latest_version = None @classmethod def matches_user_agent(cls, user_agent): for user_agent_re in cls.user_agent_re: match = user_agent_re.search(user_agent) if match: v = match.groups()[0] return vint(cls.min_display_version) <= vint(v) class MOZILLA(App): """Mozilla exists for completeness and historical purposes. Stats and other modules may reference this for history. This should NOT be added to APPS. """ id = 2 short = 'mz' shortername = 'mz' pretty = _(u'Mozilla') browser = True types = [ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP, ADDON_PLUGIN] guid = '{86c18b42-e466-45a9-ae7a-9b95ba6f5640}' platforms = 'desktop' # DESKTOP_PLATFORMS (set in constants.platforms) class UNKNOWN_APP(App): """Placeholder for unknown applications.""" pretty = _(u'Unknown') # UAs will attempt to match in this order. APP_DETECT = (ANDROID, FIREFOX) APP_USAGE = (FIREFOX, ANDROID) APPS = {app.short: app for app in APP_USAGE} APP_OBSOLETE = (MOZILLA, SUNBIRD, MOBILE, THUNDERBIRD, SEAMONKEY) APPS_ALL = {app.id: app for app in APP_USAGE + APP_OBSOLETE} APP_IDS = {app.id: app for app in APP_USAGE} APP_GUIDS = {app.guid: app for app in APP_USAGE} APPS_CHOICES = tuple((app.id, app.pretty) for app in APP_USAGE) APP_TYPE_SUPPORT = {} for _app in APP_USAGE: for _type in _app.types: APP_TYPE_SUPPORT.setdefault(_type, []).append(_app) # Fake max version for when we want max compatibility FAKE_MAX_VERSION = '9999' # The lowest maxVersion an app has to support to allow default-to-compatible. D2C_MIN_VERSIONS = { FIREFOX.id: '4.0', ANDROID.id: '11.0', } for _app in APPS_ALL.values(): _versions = list(getattr(_app, 'exclude_versions', [])) # 99 comes from the hacks we do to make search tools compatible with # versions (bug 692360). _versions.append(99) _app.exclude_versions = tuple(_versions) del _app, _type, _versions
bsd-3-clause
8,852,225,759,784,380,000
31
78
0.643382
false
erdc/proteus
proteus/tests/CLSVOF/with_RANS2P/vof_n.py
1
2315
from __future__ import absolute_import from proteus.default_n import * from proteus import (StepControl, TimeIntegration, NonlinearSolvers, LinearSolvers, LinearAlgebraTools) from . import vof_p as physics from proteus.mprans import VOF from .multiphase import * if timeDiscretization=='vbdf': timeIntegration = TimeIntegration.VBDF timeOrder=2 stepController = StepControl.Min_dt_cfl_controller elif timeDiscretization=='flcbdf': timeIntegration = TimeIntegration.FLCBDF #stepController = FLCBDF_controller stepController = StepControl.Min_dt_cfl_controller time_tol = 10.0*vof_nl_atol_res atol_u = {0:time_tol} rtol_u = {0:time_tol} else: timeIntegration = TimeIntegration.BackwardEuler_cfl stepController = StepControl.Min_dt_cfl_controller femSpaces = {0: basis} massLumping = False numericalFluxType = VOF.NumericalFlux conservativeFlux = None subgridError = VOF.SubgridError(coefficients=physics.coefficients, nd=domain.nd) shockCapturing = VOF.ShockCapturing(coefficients=physics.coefficients, nd=domain.nd, shockCapturingFactor=vof_shockCapturingFactor, lag=vof_lag_shockCapturing) fullNewtonFlag = True multilevelNonlinearSolver = NonlinearSolvers.Newton levelNonlinearSolver = NonlinearSolvers.Newton nonlinearSmoother = None linearSmoother = None matrix = LinearAlgebraTools.SparseMatrix if useOldPETSc: multilevelLinearSolver = LinearSolvers.PETSc levelLinearSolver = LinearSolvers.PETSc else: multilevelLinearSolver = LinearSolvers.KSP_petsc4py levelLinearSolver = LinearSolvers.KSP_petsc4py if useSuperlu: multilevelLinearSolver = LinearSolvers.LU levelLinearSolver = LinearSolvers.LU linear_solver_options_prefix = 'vof_' levelNonlinearSolverConvergenceTest = 'rits' linearSolverConvergenceTest = 'r-true' tolFac = 0.0 nl_atol_res = vof_nl_atol_res linTolFac = 0.001 l_atol_res = 0.001*vof_nl_atol_res useEisenstatWalker = False maxNonlinearIts = 50 maxLineSearches = 0 auxiliaryVariables = domain.auxiliaryVariables['vof']
mit
-678,544,537,654,634,200
30.283784
85
0.691577
false
ardi69/pyload-0.4.10
pyload/plugin/hoster/DebridItaliaCom.py
1
1486
# -*- coding: utf-8 -*- import re from pyload.plugin.internal.MultiHoster import MultiHoster class DebridItaliaCom(MultiHoster): __name = "DebridItaliaCom" __type = "hoster" __version = "0.17" __pattern = r'https?://(?:www\.|s\d+\.)?debriditalia\.com/dl/\d+' __config = [("use_premium", "bool", "Use premium account if available", True)] __description = """Debriditalia.com multi-hoster plugin""" __license = "GPLv3" __authors = [("stickell", "[email protected]"), ("Walter Purcaro", "[email protected]")] URL_REPLACEMENTS = [("https://", "http://")] def handle_premium(self, pyfile): self.html = self.load("http://www.debriditalia.com/api.php", get={'generate': "on", 'link': pyfile.url, 'p': self.getPassword()}) if "ERROR:" not in self.html: self.link = self.html.strip() else: self.info['error'] = re.search(r'ERROR:(.*)', self.html).group(1).strip() self.html = self.load("http://debriditalia.com/linkgen2.php", post={'xjxfun' : "convertiLink", 'xjxargs[]': "S<![CDATA[%s]]>" % pyfile.url, 'xjxargs[]': "S%s" % self.getPassword()}) try: self.link = re.search(r'<a href="(.+?)"', self.html).group(1) except AttributeError: pass
gpl-3.0
-7,252,663,072,103,607,000
35.243902
98
0.502692
false
timopulkkinen/BubbleFish
tools/telemetry/telemetry/core/chrome/browser_backend.py
1
7157
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import urllib2 import httplib import socket import json import re import sys from telemetry.core import util from telemetry.core import exceptions from telemetry.core import user_agent from telemetry.core import wpr_modes from telemetry.core import wpr_server from telemetry.core.chrome import extension_dict_backend from telemetry.core.chrome import tab_list_backend from telemetry.core.chrome import tracing_backend from telemetry.test import options_for_unittests class ExtensionsNotSupportedException(Exception): pass class BrowserBackend(object): """A base class for browser backends. Provides basic functionality once a remote-debugger port has been established.""" WEBPAGEREPLAY_HOST = '127.0.0.1' def __init__(self, is_content_shell, supports_extensions, options): self.browser_type = options.browser_type self.is_content_shell = is_content_shell self._supports_extensions = supports_extensions self.options = options self._browser = None self._port = None self._inspector_protocol_version = 0 self._chrome_branch_number = 0 self._webkit_base_revision = 0 self._tracing_backend = None self.webpagereplay_local_http_port = util.GetAvailableLocalPort() self.webpagereplay_local_https_port = util.GetAvailableLocalPort() self.webpagereplay_remote_http_port = self.webpagereplay_local_http_port self.webpagereplay_remote_https_port = self.webpagereplay_local_https_port if options.dont_override_profile and not options_for_unittests.AreSet(): sys.stderr.write('Warning: Not overriding profile. This can cause ' 'unexpected effects due to profile-specific settings, ' 'such as about:flags settings, cookies, and ' 'extensions.\n') self._tab_list_backend = tab_list_backend.TabListBackend(self) self._extension_dict_backend = None if supports_extensions: self._extension_dict_backend = \ extension_dict_backend.ExtensionDictBackend(self) def SetBrowser(self, browser): self._browser = browser self._tab_list_backend.Init() @property def browser(self): return self._browser @property def supports_extensions(self): """True if this browser backend supports extensions.""" return self._supports_extensions @property def tab_list_backend(self): return self._tab_list_backend @property def extension_dict_backend(self): return self._extension_dict_backend def GetBrowserStartupArgs(self): args = [] args.extend(self.options.extra_browser_args) args.append('--disable-background-networking') args.append('--metrics-recording-only') args.append('--no-first-run') if self.options.wpr_mode != wpr_modes.WPR_OFF: args.extend(wpr_server.GetChromeFlags( self.WEBPAGEREPLAY_HOST, self.webpagereplay_remote_http_port, self.webpagereplay_remote_https_port)) args.extend(user_agent.GetChromeUserAgentArgumentFromType( self.options.browser_user_agent_type)) extensions = [extension.local_path for extension in self.options.extensions_to_load if not extension.is_component] extension_str = ','.join(extensions) if len(extensions) > 0: args.append('--load-extension=%s' % extension_str) component_extensions = [extension.local_path for extension in self.options.extensions_to_load if extension.is_component] component_extension_str = ','.join(component_extensions) if len(component_extensions) > 0: args.append('--load-component-extension=%s' % component_extension_str) return args @property def wpr_mode(self): return self.options.wpr_mode def _WaitForBrowserToComeUp(self, timeout=None): def IsBrowserUp(): try: self.Request('', timeout=timeout) except (socket.error, httplib.BadStatusLine, urllib2.URLError): return False else: return True try: util.WaitFor(IsBrowserUp, timeout=30) except util.TimeoutException: raise exceptions.BrowserGoneException() def AllExtensionsLoaded(): for e in self.options.extensions_to_load: if not e.extension_id in self._extension_dict_backend: return False extension_object = self._extension_dict_backend[e.extension_id] extension_object.WaitForDocumentReadyStateToBeInteractiveOrBetter() return True if self._supports_extensions: util.WaitFor(AllExtensionsLoaded, timeout=30) def _PostBrowserStartupInitialization(self): # Detect version information. data = self.Request('version') resp = json.loads(data) if 'Protocol-Version' in resp: self._inspector_protocol_version = resp['Protocol-Version'] if 'Browser' in resp: branch_number_match = re.search('Chrome/\d+\.\d+\.(\d+)\.\d+', resp['Browser']) else: branch_number_match = re.search( 'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari', resp['User-Agent']) webkit_version_match = re.search('\((trunk)?\@(\d+)\)', resp['WebKit-Version']) if branch_number_match: self._chrome_branch_number = int(branch_number_match.group(1)) else: # Content Shell returns '' for Browser, for now we have to # fall-back and assume branch 1025. self._chrome_branch_number = 1025 if webkit_version_match: self._webkit_base_revision = int(webkit_version_match.group(2)) return # Detection has failed: assume 18.0.1025.168 ~= Chrome Android. self._inspector_protocol_version = 1.0 self._chrome_branch_number = 1025 self._webkit_base_revision = 106313 def Request(self, path, timeout=None): url = 'http://localhost:%i/json' % self._port if path: url += '/' + path req = urllib2.urlopen(url, timeout=timeout) return req.read() @property def chrome_branch_number(self): return self._chrome_branch_number @property def supports_tab_control(self): return self._chrome_branch_number >= 1303 @property def supports_tracing(self): return self.is_content_shell or self._chrome_branch_number >= 1385 def StartTracing(self): if self._tracing_backend is None: self._tracing_backend = tracing_backend.TracingBackend(self._port) self._tracing_backend.BeginTracing() def StopTracing(self): self._tracing_backend.EndTracing() def GetTraceResultAndReset(self): return self._tracing_backend.GetTraceResultAndReset() def GetRemotePort(self, _): return util.GetAvailableLocalPort() def Close(self): if self._tracing_backend: self._tracing_backend.Close() self._tracing_backend = None def CreateForwarder(self, *port_pairs): raise NotImplementedError() def IsBrowserRunning(self): raise NotImplementedError() def GetStandardOutput(self): raise NotImplementedError()
bsd-3-clause
5,739,832,004,754,966,000
32.759434
80
0.685203
false
OpenBfS/dokpool-plone
Plone/src/docpool.theme/setup.py
1
1026
from setuptools import find_packages from setuptools import setup import os version = '1.0' setup( name='docpool.theme', version=version, description="DocPool Theme", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # Get more strings from # http://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ "Framework :: Plone", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='', author='', author_email='', url='', license='GPL', packages=find_packages(exclude=['ez_setup']), namespace_packages=['docpool'], include_package_data=True, zip_safe=False, install_requires=[ 'setuptools', 'z3c.jbot', # -*- Extra requirements: -*- 'docpool.menu', ], entry_points=""" # -*- Entry points: -*- [z3c.autoinclude.plugin] target = plone """, )
gpl-3.0
2,016,604,569,222,115,300
22.860465
71
0.582846
false
justanothercoder/LSTM-Optimizer-TF
optimizees/booth.py
1
1578
import numpy as np import tensorflow as tf from . import optimizee class Booth(optimizee.Optimizee): name = 'booth' def __init__(self, low=2, high=10): super(Booth, self).__init__() self.low = low self.high = high def get_x_dim(self): return self.dim def build(self): with tf.variable_scope('booth'): self.dim = tf.placeholder(tf.int32, [], name='dim') self.a = tf.placeholder(tf.float32, [None, None], name='a') self.b = tf.placeholder(tf.float32, [None, None], name='b') def loss(self, x, i): t0 = x[..., ::2] t1 = x[..., 1::2] s = tf.reduce_sum( (t0 + 2 * t1 - self.a - 2 * self.b)**2 + (2 * t0 + t1 - 2 * self.a - self.b)**2, axis=-1) g = self.grad(x, s) return s, g def get_initial_x(self, batch_size=1): self.D = np.random.randint(low=self.low, high=self.high) x = np.random.normal(1, 0.1, size=(batch_size, self.D, 1)) y = np.random.normal(3, 0.1, size=(batch_size, self.D, 1)) self.t = np.concatenate([x, y], axis=-1).reshape(batch_size, -1) return self.t def get_new_params(self, batch_size=1): return { self.a: np.random.normal(self.t[..., ::2], 0.1, size=(batch_size, self.D)), self.b: np.random.normal(self.t[..., 1::2], 0.1, size=(batch_size, self.D)), self.dim: self.D * 2 } def get_next_dict(self, n_bptt_steps, batch_size=1): return { }
mit
8,666,483,127,483,528,000
26.206897
88
0.507605
false
caulagi/py-ras
test/test_slogan_manager.py
1
2519
""" Tests for interaction with db for slogans """ import asyncio import asyncpg from asynctest import TestCase, ignore_loop from server.const import connection_url from server.slogan_manager import SloganManager from server.util import random_string class SloganManagerTest(TestCase): @classmethod def setUpClass(cls): cls.sm = SloganManager() cls.loop = asyncio.get_event_loop() def test_init(self): async def _test_init(self): conn = await asyncpg.connect(connection_url()) row = await conn.fetchrow( 'select table_name from information_schema.tables where table_name = \'slogan\'' ) assert row['table_name'] == 'slogan' self.loop.run_until_complete(_test_init(self)) # pylint: disable=R0201 @ignore_loop def test_md5(self): assert SloganManager.get_md5('test') == '098f6bcd4621d373cade4e832627b4f6' def test_create(self): async def _test_create(self): title = random_string() ok, res = await self.sm.create(title) assert ok is True assert res == title self.loop.run_until_complete(_test_create(self)) def test_create_unique_constraint(self): async def _test_create_unique_constraint(self): title = random_string() await self.sm.create(title) ok, _ = await self.sm.create(title) assert ok is False self.loop.run_until_complete(_test_create_unique_constraint(self)) def test_rent_when_available(self): async def _test_rent_when_available(self): title = random_string() await self.sm.create(title) status, _ = await self.sm.rent(rented_by=title) assert status is True self.loop.run_until_complete(_test_rent_when_available(self)) # def test_rent_none_available(self): # with NamedTemporaryFile() as test_db: # slogan_manager = SloganManager(test_db.name) # slogan_manager.create('test') # slogan_manager.rent() # status, _ = slogan_manager.rent() # assert status is False def test_list(self): async def _test_list(self): title = random_string() await self.sm.create(title) return await self.sm.list() status, res = self.loop.run_until_complete(_test_list(self)) assert status is True assert res[0] > 0 assert len(res) == 2
mit
8,327,923,591,910,263,000
32.586667
96
0.608972
false
bobisjan/django-shanghai
shanghai/mixins/dispatcher.py
1
2174
import sys from django.conf import settings from django.views.decorators.csrf import csrf_exempt from shanghai.http import HttpResponseNotImplemented from shanghai.utils import setattrs class DispatcherMixin(object): def action_not_implemented(self): return HttpResponseNotImplemented() def resolve_pk(self, pk): return self.primary_key().transform.deserialize(pk) def resolve_parameters(self): pk = self.kwargs.get('pk', None) link = self.kwargs.get('link', None) related = self.kwargs.get('related', None) pk = self.resolve_pk(pk) return pk, link, related def resolve_action(self): method = self.request.method.lower() if self.pk is None: return method, 'collection' elif self.link: return method, 'linked' elif self.related: return method, 'related' else: return method, 'object' def resolve_input(self): import json method = self.request.method.lower() body = self.request.body if method in ('post', 'patch', 'delete') and body is not None: body = body.decode(settings.DEFAULT_CHARSET) if body is None or not len(body): return None return json.loads(body) def is_post_collection(self): return self.action[0] == 'post' and self.action[1] == 'collection' @csrf_exempt def dispatch(self, request, *args, **kwargs): setattrs(self, request=request, args=args, kwargs=kwargs) pk, link, related = self.resolve_parameters() setattrs(self, pk=pk, link=link, related=related) action = self.resolve_action() setattr(self, 'action', action) input = self.resolve_input() setattr(self, 'input', input) callback = getattr(self, '_'.join(action), self.action_not_implemented) try: response = callback() except: exc_info = sys.exc_info() if settings.DEBUG: raise return self.response_with_error(exc_info[1]) else: return response
mit
7,950,718,102,252,483,000
25.839506
79
0.599356
false
peterwilletts24/Python-Scripts
plot_scripts/EMBRACE/plot_from_pp_interp_p_levs_temp_geop_sp_hum_the_rest.py
1
13214
""" Load pp, plot and save 8km difference """ import os, sys #%matplotlib inline #%pylab inline import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from matplotlib import rc from matplotlib.font_manager import FontProperties from matplotlib import rcParams from mpl_toolkits.basemap import Basemap rc('font', family = 'serif', serif = 'cmr10') rc('text', usetex=True) rcParams['text.usetex']=True rcParams['text.latex.unicode']=True rcParams['font.family']='serif' rcParams['font.serif']='cmr10' import matplotlib.pyplot as plt #from matplotlib import figure import matplotlib as mpl import matplotlib.cm as mpl_cm import numpy as np import iris import iris.coords as coords import iris.quickplot as qplt import iris.plot as iplt import iris.coord_categorisation import iris.unit as unit import cartopy.crs as ccrs import cartopy.io.img_tiles as cimgt import matplotlib.ticker as mticker from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import datetime from mpl_toolkits.basemap import cm import imp from textwrap import wrap import re import iris.analysis.cartography import math from dateutil import tz #import multiprocessing as mp import gc import types import pdb save_path='/nfs/a90/eepdw/Figures/EMBRACE/' model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py') unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py') #pp_file = '' #plot_diags=['temp', 'sp_hum'] plot_diags=['sp_hum'] plot_levels = [925, 850, 700, 500] #experiment_ids = ['dkmbq', 'dklyu'] experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkjxq', 'dkbhu'] # All minus large 2 #experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12 #experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12 #experiment_ids = ['dkbhu', 'dkjxq'] pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/' degs_crop_top = 1.7 degs_crop_bottom = 2.5 from iris.coord_categorisation import add_categorised_coord def add_hour_of_day(cube, coord, name='hour'): add_categorised_coord(cube, name, coord, lambda coord, x: coord.units.num2date(x).hour) figprops = dict(figsize=(8,8), dpi=100) #cmap=cm.s3pcpn_l u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian') dx, dy = 10, 10 divisor=10 # for lat/lon rounding lon_high = 101.866 lon_low = 64.115 lat_high = 33. lat_low =-6.79 lon_low_tick=lon_low -(lon_low%divisor) lon_high_tick=math.ceil(lon_high/divisor)*divisor lat_low_tick=lat_low - (lat_low%divisor) lat_high_tick=math.ceil(lat_high/divisor)*divisor def main(): for p_level in plot_levels: # Set pressure height contour min/max if p_level == 925: clev_min = 660. clev_max = 810. elif p_level == 850: clev_min = 1435. clev_max = 1530. elif p_level == 700: clev_min = 3090. clev_max = 3155. elif p_level == 500: clev_min = 5800. clev_max = 5890. else: print 'Contour min/max not set for this pressure level' # Set potential temperature min/max if p_level == 925: clevpt_min = 300. clevpt_max = 312. elif p_level == 850: clevpt_min = 302. clevpt_max = 310. elif p_level == 700: clevpt_min = 312. clevpt_max = 320. elif p_level == 500: clevpt_min = 325. clevpt_max = 332. else: print 'Potential temperature min/max not set for this pressure level' # Set specific humidity min/max if p_level == 925: clevsh_min = 0.012 clevsh_max = 0.020 elif p_level == 850: clevsh_min = 0.007 clevsh_max = 0.017 elif p_level == 700: clevsh_min = 0.002 clevsh_max = 0.010 elif p_level == 500: clevsh_min = 0.001 clevsh_max = 0.005 else: print 'Specific humidity min/max not set for this pressure level' #clevs_col = np.arange(clev_min, clev_max) clevs_lin = np.arange(clev_min, clev_max, 5) p_level_constraint = iris.Constraint(pressure=p_level) for plot_diag in plot_diags: for experiment_id in experiment_ids: expmin1 = experiment_id[:-1] pp_file = '%s_%s_on_p_levs_mean_by_hour.pp' % (experiment_id, plot_diag) pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, pp_file) pcube = iris.load_cube(pfile, p_level_constraint) # For each hour in cube height_pp_file = '%s_408_on_p_levs_mean_by_hour.pp' % (experiment_id) height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file) height_cube = iris.load_cube(height_pfile, p_level_constraint) print pcube print height_cube #time_coords = cube_f.coord('time') add_hour_of_day(pcube, pcube.coord('time')) add_hour_of_day(height_cube, height_cube.coord('time')) #pcube.remove_coord('time') #cube_diff.remove_coord('time') #height_cube.remove_coord('time') #height_cube_diff.remove_coord('time') #p_cube_difference = iris.analysis.maths.subtract(pcube, cube_diff, dim='hour') #height_cube_difference = iris.analysis.maths.subtract(height_cube, height_cube_diff, dim='hour') #pdb.set_trace() #del height_cube, pcube, height_cube_diff, cube_diff for t, time_cube in enumerate(pcube.slices(['grid_latitude', 'grid_longitude'])): #pdb.set_trace() print time_cube height_cube_slice = height_cube.extract(iris.Constraint(hour=time_cube.coord('hour').points)) # Get time of averagesfor plot title h = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).strftime('%H%M') #Convert to India time from_zone = tz.gettz('UTC') to_zone = tz.gettz('Asia/Kolkata') h_utc = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).replace(tzinfo=from_zone) h_local = h_utc.astimezone(to_zone).strftime('%H%M') fig = plt.figure(**figprops) cmap=plt.cm.RdBu_r ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top)) m =\ Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229) #pdb.set_trace() lat = time_cube.coord('grid_latitude').points lon = time_cube.coord('grid_longitude').points cs = time_cube.coord_system('CoordSystem') lons, lats = np.meshgrid(lon, lat) lons, lats = iris.analysis.cartography.unrotate_pole\ (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude) x,y = m(lons,lats) if plot_diag=='temp': min_contour = clevpt_min max_contour = clevpt_max cb_label='K' main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\ and wind (vectors) %s UTC %s IST' % (h, h_local) tick_interval=2 clev_number=max_contour-min_contour+1 elif plot_diag=='sp_hum': min_contour = clevsh_min max_contour = clevsh_max cb_label='kg/kg' main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\ and wind (vectors) %s UTC %s IST' % (h, h_local) tick_interval=0.002 clev_number=max_contour-min_contour+0.001 clevs = np.linspace(min_contour, max_contour, clev_number) #clevs = np.linspace(-3, 3, 32) cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both') #cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both') cs_lin = iplt.contour(height_cube_slice, clevs_lin,colors='#262626',linewidths=1.) plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black') #del time_cube #plt.clabel(cont, fmt='%d') #ax.stock_img() ax.coastlines(resolution='110m', color='#262626') gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--') gl.xlabels_top = False gl.ylabels_right = False #gl.xlines = False dx, dy = 10, 10 gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx)) gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy)) gl.xformatter = LONGITUDE_FORMATTER gl.yformatter = LATITUDE_FORMATTER gl.xlabel_style = {'size': 12, 'color':'#262626'} #gl.xlabel_style = {'color': '#262626', 'weight': 'bold'} gl.ylabel_style = {'size': 12, 'color':'#262626'} cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both') cbar.set_label('%s' % cb_label, fontsize=10, color='#262626') #cbar.set_label(time_cube.units, fontsize=10, color='#262626') cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval)) ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval)) cbar.set_ticklabels(['${%.1f}$' % i for i in ticks]) cbar.ax.tick_params(labelsize=10, color='#262626') #main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local) #main_title=time_cube.standard_name.title().replace('_',' ') #model_info = re.sub(r'[(\']', ' ', model_info) #model_info = re.sub(r'[\',)]', ' ', model_info) #print model_info file_save_name = '%s_%s_%s_hPa_and_geop_height_%s' % (experiment_id, plot_diag, p_level, h) save_dir = '%s%s/%s' % (save_path, experiment_id, plot_diag) if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir)) #plt.show() fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight') plt.title('%s UTC %s IST' % (h, h_local)) fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight') model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL) plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16) fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight') fig.clf() plt.close() #del time_cube gc.collect() if __name__ == '__main__': main() #proc=mp.Process(target=worker) #proc.daemon=True #proc.start() #proc.join()
mit
-485,869,482,436,297,540
35.910615
163
0.517179
false
helixyte/TheLMA
thelma/repositories/rdb/mappers/labisorequest.py
1
2209
""" This file is part of the TheLMA (THe Laboratory Management Application) project. See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information. Lab ISO request mapper. """ from sqlalchemy.orm import relationship from everest.repositories.rdb.utils import mapper from thelma.entities.experiment import ExperimentMetadata from thelma.entities.iso import ISO_TYPES from thelma.entities.iso import LabIsoRequest from thelma.entities.library import MoleculeDesignLibrary from thelma.entities.liquidtransfer import ReservoirSpecs from thelma.entities.racklayout import RackLayout from thelma.entities.user import User __docformat__ = "reStructuredText en" __all__ = ['create_mapper'] def create_mapper(iso_request_mapper, lab_iso_request_tbl, experiment_metadata_iso_request_tbl, reservoir_specs_tbl, molecule_design_library_lab_iso_request_tbl): "Mapper factory." lir = lab_iso_request_tbl emir = experiment_metadata_iso_request_tbl rs = reservoir_specs_tbl m = mapper(LabIsoRequest, lab_iso_request_tbl, inherits=iso_request_mapper, properties=dict( requester=relationship(User, uselist=False), rack_layout=relationship(RackLayout, uselist=False, cascade='all,delete,delete-orphan', single_parent=True), experiment_metadata=relationship(ExperimentMetadata, secondary=emir, uselist=False, back_populates='lab_iso_request'), molecule_design_library=relationship( MoleculeDesignLibrary, uselist=False, secondary=molecule_design_library_lab_iso_request_tbl), iso_plate_reservoir_specs=relationship(ReservoirSpecs, uselist=False, primaryjoin=lir.c.iso_plate_reservoir_specs_id == \ rs.c.reservoir_specs_id) ), polymorphic_identity=ISO_TYPES.LAB, ) return m
mit
4,018,588,075,209,770,500
43.18
80
0.615663
false
carquois/blobon
blobon/punns/management/commands/sample_utils.py
1
4481
#!/usr/bin/python # # Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for Analytics API code samples. Handles various tasks to do with logging, authentication and initialization. Mostly taken from Sergio :) Before You Begin: You must update the client_secrets.json file with a client id, client secret, and the redirect uri. You get these values by creating a new project in the Google APIs console and registering for OAuth2.0 for installed applications: https://code.google.com/apis/console Also all OAuth2.0 tokens are stored for resue in the file specified as TOKEN_FILE_NAME. You can modify this file name if you wish. """ __author__ = ('[email protected] (Sergio Gomes)' '[email protected] (Nick Mihailovski)') import logging import os import sys from apiclient.discovery import build import gflags import httplib2 from oauth2client.client import flow_from_clientsecrets from oauth2client.client import OOB_CALLBACK_URN from oauth2client.file import Storage from oauth2client.tools import run FLAGS = gflags.FLAGS # CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this # application, including client_id and client_secret. You get these values by # creating a new project in the Google APIs console and registering for # OAuth2.0 for installed applications: <https://code.google.com/apis/console> CLIENT_SECRETS = 'client_secrets.json' # Helpful message to display in the browser if the CLIENT_SECRETS file # is missing. MISSING_CLIENT_SECRETS_MESSAGE = """ WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the APIs Console <https://code.google.com/apis/console>. """ % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS) # Set up a Flow object to be used if we need to authenticate. FLOW = flow_from_clientsecrets(CLIENT_SECRETS, scope='https://www.googleapis.com/auth/analytics.readonly', redirect_uri=OOB_CALLBACK_URN, message=MISSING_CLIENT_SECRETS_MESSAGE) # The gflags module makes defining command-line options easy for applications. # Run this program with the '--help' argument to see all the flags that it # understands. gflags.DEFINE_enum('logging_level', 'ERROR', ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], 'Set the level of logging detail.') # Name of file that will store the access and refresh tokens to access # the API without having to login each time. Make sure this file is in # a secure place. TOKEN_FILE_NAME = 'analytics.dat' def process_flags(argv): """Uses the command-line flags to set the logging level. Args: argv: List of command line arguments passed to the python script. """ # Let the gflags module process the command-line arguments. try: argv = FLAGS(argv) except gflags.FlagsError, e: print '%s\nUsage: %s ARGS\n%s' % (e, argv[0], FLAGS) sys.exit(1) # Set the logging according to the command-line flag. logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level)) def initialize_service(): """Returns an instance of service from discovery data and does auth. This method tries to read any existing OAuth 2.0 credentials from the Storage object. If the credentials do not exist, new credentials are obtained. The crdentials are used to authorize an http object. The http object is used to build the analytics service object. Returns: An analytics v3 service object. """ # Create an httplib2.Http object to handle our HTTP requests. http = httplib2.Http() # Prepare credentials, and authorize HTTP object with them. storage = Storage(TOKEN_FILE_NAME) credentials = storage.get() if credentials is None or credentials.invalid: credentials = run(FLOW, storage) http = credentials.authorize(http) # Retrieve service. return build('analytics', 'v3', http=http)
mit
4,576,329,098,788,045,000
32.691729
78
0.743807
false
nagisa/Feeds
trifle/utils/sqlite.py
1
2461
from gi.repository import GLib, GObject import os import sqlite3 import logging import threading import queue logger = logging.getLogger('trifle') from trifle.utils import const, async, get_data_path class SQLite(threading.Thread): def __init__(self, *args, **kwargs): super(SQLite, self).__init__() self.daemon = True # Contains jobs in form of (SQLiteJob, callable, args, kwargs) self._jobs = queue.Queue() self._args, self._kwargs = args, kwargs def run(self): with sqlite3.Connection(*self._args, **self._kwargs) as cnn: del self._args, self._kwargs while True: job, method, args, kwargs = self._jobs.get() if method is None: break try: result = getattr(cnn, method)(*args, **kwargs) if hasattr(result, 'fetchall'): job.result = result.fetchall() else: job.result = result GLib.idle_add(job.emit, 'finished', True) except: # Yes, catch 'em all! logger.exception('SQLite error') GLib.idle_add(job.emit, 'finished', False) self._jobs.task_done() GLib.idle_add(self.join) def commit(self, *args, **kwargs): job = async.Job() self._jobs.put((job, 'commit', args, kwargs)) return job def execute(self, *args, **kwargs): job = async.Job() self._jobs.put((job, 'execute', args, kwargs)) return job def executemany(self, *args, **kwargs): job = async.Job() self._jobs.put((job, 'executemany', args, kwargs)) return job def executescript(self, *args, **kwargs): job = async.Job() self._jobs.put((job, 'executescript', args, kwargs)) return job def stop(self, *args, **kwargs): self._jobs.put((None, None, None, None,)) _sqlite_path = os.path.join(const.CACHE_PATH, 'metadata') _init_sqlite = not os.path.exists(_sqlite_path) if _init_sqlite and not os.path.exists(os.path.dirname(_sqlite_path)): os.makedirs(os.path.dirname(_sqlite_path)) # Started in views.application.Application.on_startup sqlite = SQLite(_sqlite_path) if _init_sqlite: with open(get_data_path('db_init.sql'), 'r') as script: sqlite.executescript(script.read()) sqlite.commit()
gpl-2.0
-3,084,343,001,645,565,000
31.381579
70
0.567249
false
sbuss/TigerShark
tigershark/facade/enums/common.py
1
146273
quantity_qualifier = { "01": "Discrete Quantity", "02": "Cumulative Quantity", "03": "Discreet Quantity - Rejected Material", "04": "Discrete Quantity - Rejected Material: Disposition Replacement", "05": "Discrete Quantity - Rejected Material: Disposition Credit", "06": "Discrete Quantity - Rejected Material: Disposition Pending", "07": "Cumulative Quantity - Rejected Material", "08": "Cumulative Quantity - Rejected Material: Disposition Replacement", "09": "Cumulative Quantity - Rejected Material: Disposition Credit", "10": "Cumulative Quantity - Rejected Material: Disposition Pending", "11": "Split Quantity", "12": "Ship Notice Quantity", "13": "Collateral Requirements", "14": "Quantity in Float", "15": "Quantity in Hold Out", "16": "Line Thread Quantity", "17": "Quantity on Hand", "18": "Previous Week Quantity", "19": "Unverified Receipts", "1A": "Original Duration (in calendar units)", "1B": "Current Duration (in calendar units)", "1C": "Remaining Duration (in calendar units)", "1D": "Total Float (in calendar units)", "1E": "Free Float (in calendar units)", "1F": "Lag (as in Lag Time - in calendar units)", "1G": "Lead Time (in calendar units)", "1H": "Started", "1I": "Completed", "1J": "Due", "1K": "Time Units", "1L": "Shifts", "1M": "Time units per shift", "1N": "Scrap allowed", "1O": "Calendar Units", "1P": "Resource (Quantity) available", "1Q": "Total Resource (Quantity)", "1R": "Level Resource (Quantity)", "1S": "Late", "1T": "Number of Delinquent Installments", "1U": "Number of Loans", "1V": "Total Number of Mortgagees", "1W": "Total Number of Loan Detail Records", "1X": "Prescription Effective Period", "1Y": "Rate Per Day (RPD)", "1Z": "End Of Month Inventory Prior To Ship", "20": "Unusable Quantity", "21": "Cumulative Quantity Shipped Short - Disposition Pending", "22": "Cumulative Quantity Shipped Short - Disposition Challenged", "23": "Cumulative Quantity Shipped Long - Disposition Pending", "24": "Cumulative Quantity Shipped Long - Disposition Challenged", "25": "OEM Inventory", "26": "Total Inventory", "27": "Committed Quantity", "28": "Quantity Available for Return", "29": "Projected Available Inventory", "2A": "Commitment Period", "2B": "Number of Borrowers", "2C": "Number of Adjustment Periods", "2D": "Age Nearest", "2E": "Total Other Properties Owned and Financed", "2F": "Age Next", "2G": "Reconsideration Period", "2H": "Flat Extra Premium", "2I": "CO2 Injection Volume", "2J": "Accounts Placed for Collection", "2K": "Changes", "2L": "Companies in Same Activity for a Period", "2M": "Comparison Period", "2N": "Departments", "2O": "Employees Shared", "2P": "Estimated Accounts", "2Q": "Installed Capacity", "2R": "Levels Occupied", "2S": "Registered Brands Distributed", "2T": "Electronic Signatures", "2U": "Bytes", "2V": "Employed at this Location", "2W": "Segments", "2X": "Registered Brands Manufactured", "2Y": "Functional Groups", "2Z": "Transaction Sets", "30": "Quote Quantity on Inventory", "31": "Additional Demand Quantity", "32": "Quantity Sold", "33": "Quantity Available for Sale (stock quantity)", "34": "Noncommitted Inventory on Shelf", "35": "Inventory on Shelf + Work in Progress", "36": "Distributor Inventory", "37": "Work In Process", "38": "Original Quantity", "39": "Shipped Quantity", "3A": "Total Credits Accepted", "3B": "Total Credits Rejected", "3C": "Total Debits Accepted", "3D": "Total Debits Rejected", "3E": "Total Payments Rejected", "3F": "Total Pre-advices Accepted", "3G": "Total Pre-advices Rejected", "3H": "Total Prenotes Accepted", "3I": "Total Prenotes Rejected", "3J": "Total Post-advices Accepted", "3K": "Total Post-advices Rejected", "3L": "Total Unidentified Transactions Rejected", "3M": "Total Credits Received", "3N": "Total Debits Received", "3P": "Total Pre-advices Received", "3Q": "Total Prenotes Received", "3R": "Total Post-advices Received", "3S": "Total Debits", "3T": "Total Credits", "3U": "Total Transactions", "3V": "Minimum Transfer", "3W": "Maximum Transfer", "3X": "Speed Capacity", "3Y": "Subcontractors", "40": "Remaining Quantity", "41": "Number of Batches", "42": "Number of Checks", "43": "Talk Paths", "45": "Cumulative quantity on order", "46": "Total transactions", "47": "Primary Net Quantity", "48": "Secondary Net Quantity", "49": "Number of Signed Bills of Lading", "4A": "Accounts", "4B": "Agents", "4C": "Authorized Shares", "4D": "Clerks", "4E": "Design Employees", "4F": "Foreign Related Entities", "4G": "Group Employees", "4H": "Issued Shares", "4I": "Laborers", "4J": "Other Employee Type", "4K": "Part Time Employees", "4L": "Related Entities", "4M": "Relatives Employed", "4N": "Salespersons", "4O": "Space Occupied", "4P": "Special Partners", "4Q": "Suppliers' Credit", "4R": "Technicians", "4S": "Trainees", "4T": "Warehouse Employees", "4U": "Shareholders", "50": "Number of Copies of Bill of Lading", "51": "Number of Unsigned Bills of Lading", "52": "Number of Originals", "53": "Original payment item count.", "54": "Bank reject item count.", "55": "Net to pay item count.", "56": "Minimum Contract Quantity", "57": "Minimum Order Quantity", "58": "Payment Cancellation Item Count", "5A": "Aggregate Benefit Period", "5B": "Anticipated Length of Service", "5C": "Approval/Offer Duration", "5D": "Benefit Amount", "5E": "Benefit Period", "5F": "Brothers Deceased", "5G": "Brothers Living", "5H": "Children", "5I": "Citations", "5J": "Claim Period", "5K": "Coverage", "5L": "Elimination Period", "5M": "Elimination Period - Accident", "5N": "Elimination Period - Sickness", "5O": "Employees - Nonowner", "5P": "Employees - Owner", "5Q": "Employees - Part Time", "5R": "Employees - Same Duties", "5S": "Employees - Same Occupation", "5T": "Expense", "5U": "Frequency", "5V": "General Elimination Period", "5W": "Guarantee Period", "5X": "Height", "5Y": "Hours Flown - Aircraft Type/Life", "5Z": "Hours Flown - Aircraft Type/Period", "60": "Total Authorized Quantity", "61": "Remaining Authorized Quantity", "62": "Number of Days Covered by Inventory", "63": "On Order Quantity", "64": "Past Due Quantity", "65": "Previous Month's Usage", "66": "Minimum Fabrication Quantity", "67": "Minimum Ship Quantity", "68": "Maximum Number of Shipments Allowed", "69": "Incremental Order Quantity", "6A": "Hours Flown - Aircraft/Type Flying", "6B": "Hours Flown - Lifetime", "6C": "Hours Flown - Type Flying", "6D": "Impairment Duration", "6E": "Impairment Frequency", "6F": "Installment Frequency", "6G": "Installments", "6H": "Intended Change Time Period", "6I": "Interim Term Period", "6J": "Involvement Period", "6K": "Loan Rate", "6L": "Maximum Age", "6M": "Maximum Benefit Period - Accident", "6N": "Maximum Benefit Period - Sickness", "6O": "Maximum Benefit Period", "6P": "Medication Duration", "6Q": "Minimum Age", "6R": "Own Occupation Qualification Period", "6S": "Owner's Equity", "6T": "Ownership Change Age", "6U": "Ownership Duration", "6V": "Ownership Percentage", "6W": "Payment Frequency", "6X": "Payments Number", "6Z": "Placement Period Expiration", "70": "Maximum Order Quantity", "72": "Minimum Stock Level", "73": "Maximum Stock Level", "74": "Damaged Goods", "75": "Receipts", "76": "Returns", "77": "Stock Transfers In", "78": "Stock Transfers Out", "79": "Billing Unit(s) Per Pricing Unit", "7A": "Previous Benefits", "7B": "Qualification Period", "7C": "Range Average", "7D": "Range Maximum", "7E": "Range Minimum", "7F": "Relationship Duration", "7G": "Replaced Amount", "7H": "Residence Duration", "7I": "Sisters Deceased", "7J": "Sisters Living", "7K": "Time Frame", "7L": "Time in Country", "7M": "Time Since Hospitalization", "7N": "Time Since Last Application", "7O": "Time Since Last Civilian Flight", "7P": "Time Since Last Insurance Medical", "7Q": "Time Since Last Military Flight", "7R": "Time Since Medical Consult", "7S": "Time Since Medication End", "7T": "Time Since Medication Start", "7U": "Time Since Onset", "7V": "Time Since Surgery", "7W": "Time Since Trip", "7X": "Travel Frequency", "7Y": "Travel Period", "7Z": "Trip Duration", "80": "Pricing Unit(s) Per Billing Unit", "81": "Prepaid Quantity Shipped", "82": "Prepaid Quantity Not Shipped", "83": "Submitted Quantity Sold", "84": "Submitted Quantity Returned", "85": "Lot Size", "86": "Nonconformance Quantity", "87": "Quantity Received", "88": "Beds", "89": "Operating Beds", "8A": "Visitation Frequency", "8B": "Weight", "8C": "Weight Change Period", "8D": "Work Period", "90": "Acknowledged Quantity", "91": "Additional Usage Quantity", "92": "Allotted Usage Quantity", "93": "Attendant-Handled Quantity", "94": "Billable Quantity", "95": "Data Storage Quantity", "96": "Non-Billable Quantity", "97": "Non-Urgent Delivery Quantity", "98": "Overflow Quantity", "99": "Quantity Used", "9A": "Time Expended", "9C": "Primary Meter Reading Value", "9D": "Engineered Standard", "9E": "Active Maintenance Time", "9F": "Actual Duration", "9H": "Estimated Duration", "9J": "Gross Estimate", "9K": "Finish Offset", "9L": "Start Offset", "9M": "Picture Count", "9N": "Component Meter Reading Count", "A1": "Acceptable Unserviceable Quantity", "A2": "Optimistic Duration", "A3": "Most Likely Duration", "A4": "Pessimistic Duration", "A5": "Adjusted Quantity", "A6": "Accidents", "A7": "Years in School", "A8": "Number of Dependents", "A9": "Years on Job", "AA": "Unacknowledged Quantity", "AAA": "Quantity Earned", "AAB": "Quantity Carried Forward", "AB": "Urgent Delivery Quantity", "AC": "Voice Storage Quantity", "ACA": "Existence Limit Period", "ACB": "Shares", "ACC": "Directors", "ACD": "Minimum", "ACE": "Voting Shares Held", "ACF": "Outstanding Shares", "ACG": "Shares Held as Treasury Stock", "ACH": "Shares Subscribed but Not Issued", "ACI": "Total Shares of Stock", "ACJ": "Shares Owned by Out-of-State Residents", "ACK": "Shares Owned by In-State Residents", "ACL": "Land Holding", "ACM": "Shares Subscribed", "ACN": "Non-Domestic Stockholders", "ACO": "Partners", "AD": "Maintenance Units", "AE": "Minimum Average Time Requirement (MATR) Units", "AF": "Wide Area Telephone Service (WATS)/800 Service Units", "AG": "Number of End Users", "AH": "Number of Message Recipients", "AI": "Number of Operator Credits", "AJ": "Daily Adjustments", "AK": "Years in this Line of Work/Profession", "AL": "Area per Units", "AN": "Age at Death", "AO": "Verified Receipts", "AP": "Order Quantity Multiple", "AQ": "Contribution Total", "AR": "Loan Repayment Total", "AS": "Participant Total", "AT": "Actual", "AU": "Cumulative Actual", "AV": "Budget", "AW": "Cumulative Budget", "AX": "Number of Insured Lives", "AY": "Forecast", "AZ": "Forecast at Complete", "B1": "Number of Mortgagors", "B2": "Mortgage Pool Count", "B3": "Requested Amount", "B4": "Approved Amount", "B5": "Additional Amount", "B6": "Pre-op Days", "B7": "Post-op Days", "B8": "Average", "BA": "Due-In", "BB": "Contractor Cumulative to Date", "BC": "Budget At Complete", "BD": "Contractor at Complete", "BE": "Subcontractor Cumulative to Date", "BF": "Age Modifying Units", "BG": "Subcontractor at Complete", "BH": "Book Order Quantity", "BI": "Book Inventory", "BJ": "Bedroom Count", "BK": "Bathroom Count", "BQ": "Backorder Quantity", "BR": "Blood Record", "BW": "Birth Weight", "C0": "Creditors", "CA": "Covered - Actual", "CB": "Closing Statement Balance", "CC": "Current Days on Market", "CD": "Co-insured - Actual", "CE": "Covered - Estimated", "CF": "Co-insured - Estimated", "CG": "Cumulative Gas Volume", "CH": "Cumulative Effect of Prior Period Adjustment", "CI": "Cumulative Gas Injection Volume", "CL": "Cumulative Liquid Injection Volume", "CN": "Continuance Duration", "CO": "Cumulative Oil/Condensate Volume", "CP": "Current Period Imbalance", "CR": "Certified Registered Nurse Anesthetist (CRNA) Number of " "Concurrent Procedures", "CS": "Current Service Life", "CW": "Cumulative Water Volume", "CY": "Convictions Sent", "CZ": "Total Number of Convictions", "D1": "Billed", "D3": "Number of Co-insurance Days", "DA": "Dependent's Age", "DB": "Deductible Blood Units", "DC": "Dependent Count", "DD": "Distributed", "DE": "Debited", "DF": "Deleted", "DG": "Gas Used for Drilling", "DI": "Disposed", "DN": "Default Notification Response Period", "DO": "Days Operated", "DP": "Days Produced", "DR": "Direct Workers", "DS": "Dose", "DT": "Dependent Total", "DY": "Days", "E1": "Course Segments", "E2": "Degree Segments", "E3": "Employed on this job", "E4": "Employed in this Profession", "E5": "Employed by this Company", "EA": "Exclusive Uses", "EB": "Nonexclusive Uses", "EC": "Use of Extracorporeal Circulation", "ED": "Domestic Uses", "EE": "Small Business Uses", "EM": "Emergency Modifying Units", "EP": "Product Exchange Amount", "EQ": "Equity Security Holder", "ER": "Estimated Remaining Economic Life", "ES": "Ending Stock", "ET": "Employee Total", "EW": "Evaporated Water", "F1": "Off Lease Fuel", "FA": "Full Baths", "FB": "Furnished Blood Units", "FC": "Fuel Consumed or Burned Amount", "FD": "Vehicular Radios", "FE": "Portable Radios", "FF": "Flare or Flash", "FG": "Marine Radios", "FH": "Pagers", "FI": "Conventional Mobiles", "FJ": "Trunked Channels", "FK": "Mobile Loading Allocation", "FL": "Units", "FM": "Aircraft Radios", "FR": "Units For Sale", "FS": "Gas Used for Fuel System", "FT": "Forecast to Complete", "GA": "Gross Building Area", "GB": "Gross Annual Income Multiplier", "GC": "Gross Living Area", "GE": "Original Term In Years", "GF": "Years Remaining", "GI": "Gas Injection Volume", "GL": "Gas Lift Volume", "GP": "Gross Production", "GQ": "Government Reporting Quantity", "GR": "Gas Receipt Volume", "GS": "Gas Sold", "GT": "Grade Transfer Amount", "GU": "Employee Total First Month of Quarter", "GV": "Gas Volume", "GW": "Employee Total Second Month of Quarter", "GX": "Employee Total Third Month of Quarter", "GZ": "Active Listings", "HA": "Market Price Change", "HB": "Unpaid", "HC": "Branches", "HD": "Subsidiaries", "HE": "Age of Financial Information", "HF": "Invoices", "HG": "Financial Coverage Period", "HH": "Maximum Number of Employees at Location", "HI": "Previous Number of Accounts", "HJ": "Collection Period", "HK": "Disbursement Period", "HL": "Seats", "HM": "Use of Hypothermia", "HN": "Previous Number of Employees", "HO": "Use of Hypotension", "HP": "Use of Hyperbaric Pressurization", "HR": "Use of Hypertension", "HS": "Hours", "II": "Number of Irregular Interest Payments", "IN": "Indirect Workers", "IP": "Number of Interest Payments", "IQ": "In-Transit Quantity", "IS": "Information Provider Standardized Motor Vehicle Penalty Points", "IT": "Intertank Transfer Amount", "JA": "Activity Codes", "JB": "Associates", "JC": "Average Employees", "JD": "Cooperative Shares", "JE": "Estimated Employees at Location", "JF": "Estimated Total Employees", "JG": "Financial Institutions", "JH": "Judgments", "JI": "Land Size", "JJ": "Liens", "JK": "Minimum Employees at Location", "JL": "Office Size", "JM": "Owner", "JN": "Plant Size", "JO": "Previous Number of Branches", "JP": "Protested Bills", "JQ": "Suits", "JR": "Uniform Commercial Code (UCC) Filings", "JS": "Judicial Stay Duration", "JT": "Warehouse Size", "K6": "Drafts", "KA": "Estimated", "KB": "Net Quantity Increase", "KC": "Net Quantity Decrease", "KD": "Expenditure Quantity", "KE": "Originals", "KF": "Duplicates", "KG": "Completed Line Items", "KH": "Completed Contracts", "KI": "Active Contracts Delinquent-Buying Party Caused", "KJ": "Active Contracts Delinquent", "KK": "Active Contracts Delinquent-Contractor Caused", "KL": "Active Contracts Delinquent-Unknown Causes", "KM": "Active Line Items Delinquent", "KN": "Active Line Items Delinquent-Buying Party Caused", "KO": "Active Line Items Delinquent-Contractor Caused", "KP": "Active Line Items Delinquent-Unknown Causes", "KQ": "Contracts Completed Delinquent-Buying Party Caused", "KR": "Contract Completed Delinquent-Contractor Caused", "KS": "Contracts Completed Delinquent-Unknown Causes", "KU": "Reported Deficiencies", "KV": "Line Items Completed Delinquent-Buying Party Caused", "KW": "Line Items Completed Delinquent-Contractor Caused", "KX": "Line Items Completed Delinquent-Unknown Causes", "KY": "Corrective Action Requests-Verbal", "KZ": "Corrective Action Requests-Written", "L2": "Guarantee Fee Buyup Maximum", "L3": "Contract Buyup", "L4": "Contract Buydown", "L5": "Guarantee Fee Rate after Alternate Payment Method", "L6": "Guarantee Fee Rate after Buyup or Buydown", "L7": "Buyup or Buydown Rate per Basis Point", "LA": "Life-time Reserve - Actual", "LB": "Loss Allowance", "LC": "Late Payment Period", "LE": "Life-time Reserve - Estimated", "LG": "Loss or Gain", "LH": "Lost Gas", "LI": "Liquid Injection Volume", "LK": "Corrective Action Requests-Method C", "LL": "Corrective Action Requests-Method D", "LM": "Corrective Action Requests-Method E", "LN": "Aged Active Line Items Delinquent-Contractor Caused", "LO": "Lost Oil", "LP": "Lease Periods", "LQ": "Aged Line Items Delinquent", "LR": "Aged Line Items Completed-Contractor Caused", "LS": "Oil Condensate Sold", "LT": "Tariff Loss Allowance", "LV": "Oil/Condensate Volume", "LW": "Lost Work Time Actual", "LX": "Lost Work Time Estimated", "LY": "Length of Residency", "M1": "Matching Equipment", "M2": "Maximum", "MA": "Miscellaneous Allowance", "MD": "Million Dollar Roundtable Credits", "ME": "Minimum Number of Employees", "MF": "Manufactured", "MI": "Miles", "MM": "Maximum Maturity Extension", "MN": "Month", "MO": "Minimum Order Package Level", "MQ": "Maximum Ship Quantity", "MX": "Maximum Number of Employees", "N1": "Number of Attacks or Occurences", "N2": "Number of Dead", "N3": "Number of Living", "N4": "Number of Times", "N5": "Minimum Forecast Quantity", "N6": "Maximum Forecast Quantity", "NA": "Number of Non-covered Days", "NB": "Number of Units (Housing)", "NC": "Number of Claimants", "ND": "Number of Late Charges", "NE": "Non-Covered - Estimated", "NF": "Number of Full-Time Employees", "NG": "Number of Nonsufficient Fund Items", "NL": "Number of Levels", "NN": "Number of Hospitals", "NO": "Number of Physicians", "NP": "Number of Members", "NQ": "Number of Franchisees", "NR": "Not Replaced Blood Units", "NS": "Number of Stations", "NT": "Reports", "NU": "Last Travel", "NV": "Net", "NW": "Next Travel", "OC": "Order Count", "OD": "Other Miscellaneous Disposition", "OF": "Off Premise Sales Quantity", "OG": "Other Gas Disposition", "OH": "Other Injection Volume", "OI": "Opening Statement Balance", "OL": "Original Loan Term", "ON": "On Premise Sales Quantity", "OO": "Other Oil Condensate Disposition", "OR": "Original", "OT": "Number of Operating Periods at Failure", "OU": "Outlier Days", "OV": "Overage", "OW": "Other Water Disposition", "P1": "Project Phases", "P3": "Physical Status III", "P4": "Physical Status IV", "P5": "Physical Status V", "P6": "Number of Services or Procedures", "P7": "Prescription Dosage", "P8": "Prescription Frequency", "P9": "Number of People Living at Residence", "PA": "Pipeline Adjustment or Allowance", "PB": "Pressure Base", "PC": "Prior Cumulative Imbalance", "PD": "Payment Duration Weeks", "PE": "Period of Employment", "PF": "Gas Used for Plant Fuel", "PG": "Persistency", "PK": "Parking Spaces", "PL": "Partial Baths", "PO": "Percentage of Ordered Quantity", "PP": "Purchase of Product", "PQ": "Cumulative Quantity Required Prior to the First Scheduled Period", "PR": "Requirement Quantity that was Previously Released", "PS": "Prescription", "PT": "Patients", "PW": "Pitted Water", "PX": "Prior Units Accepted", "Q1": "Minimum quantity to which tax rate applies", "Q2": "Maximum quantity to which tax rate applies", "QA": "Quantity Approved", "QB": "Quantity Dispensed", "QC": "Quantity Disapproved", "QD": "Quantity Delivered", "QE": "Quantity Deferred", "QF": "High Fabrication Authorization Quantity", "QH": "Quantity on Hold", "QI": "Community Service Duration", "QJ": "Number of Times Deported", "QL": "Jail Sentence Duration", "QM": "Probation Duration", "QN": "Restriction Duration", "QO": "Operating Quantity", "QP": "Quantity by Position", "QQ": "Suspended Duration", "QR": "High Raw Material Authorization Quantity", "QS": "Quantity Per Skid", "QU": "Quantity Serviced", "QV": "Quantity Cancelled", "QW": "Quantity Withdrawn", "QX": "Qualifying Weeks", "R3": "Estimated Remaining Physical Life", "R5": "Axles", "R6": "Platform Count", "R9": "Fuel", "RA": "Refills Authorized", "RB": "Replaced Blood Units", "RC": "Number of Items Authorized at Store", "RD": "Number of Items Authorized at Warehouse", "RE": "Gas Returned to Earth", "RF": "Number of Items in Stock", "RG": "Gas Used for Repressuring or Pressure Maintenance", "RH": "Number of Shelf Tags", "RJ": "Quantity Available on Shelf", "RL": "Gas Returned to Property for fuel", "RM": "Room Count", "RN": "Units Rented", "RQ": "Royalty", "RS": "Number of Shelf Facings", "RT": "Retail Sales Quantity", "RW": "Water Re-injected on Property", "RY": "Requirement Quantity", "S1": "Planned Unit Development (PUD) Units", "S2": "Rooms, Finished Area Above Grade", "S3": "Dwelling Area", "S4": "Garage or Carport Area", "S5": "Units for Sale", "S6": "Gross Rent Multiplier", "S7": "Age, High Value", "S8": "Age, Low Value", "S9": "Bedrooms, Finished Area Above Grade", "SA": "Shipments", "SB": "Solicited", "SC": "Bathrooms, Finished Area Above Grade", "SD": "Criminal Sentence Duration", "SE": "Gross Living, Finished Area Above Grade", "SF": "Site", "SG": "Swan-Ganz", "SH": "Shortage", "SI": "Rooms", "SJ": "Area of Level", "SK": "Gas Shrinkage", "SL": "Predominate Age", "SM": "Minimum Criminal Sentence Duration", "SN": "Age", "SO": "Oil Sedimentation", "SP": "Days Supply", "SQ": "Product Sales Amount", "SR": "Effective Age", "SS": "Shares of Preferred Stock", "ST": "Standard", "SU": "Forecasted Scanned Quantity", "SV": "Shares of Common Stock", "SW": "Sample Amount", "SX": "Maximum Criminal Sentence Duration", "SY": "State or Province Motor Vehicle Penalty Points", "T1": "Time Units Known", "T2": "Time Units Spent on Duty", "T3": "Total Days on Market", "T4": "Total Rooms", "T5": "Total Number of Units", "T6": "Total Number of Units for Sale", "T7": "Tires", "TA": "Tank Allowance", "TB": "Oil Theft", "TC": "Total at Complete", "TD": "Total to Date", "TE": "Number of Theatres", "TG": "Total Gas Injection Volume", "TH": "Theoretical Quantity", "TI": "Total Oil and/or Condensate Injection Volume", "TJ": "Duration in Current Job", "TK": "Total Oil and/or Condensate Disposition", "TM": "Total Water Disposition", "TN": "Total Beginning Inventory", "TO": "Total", "TP": "Time in Position", "TR": "Trips", "TS": "Total Number of Parking Spaces", "TT": "Total Production Volume", "TU": "Total Adjustments Volume", "TV": "Total Gas Disposition", "TW": "Total Water Injection Volume", "TX": "Total Ending Inventory", "TY": "Total Sales Volume", "UA": "Units Completed", "UG": "Gas Used on Property", "UL": "Approximate Number of Units for Sale Projected", "UO": "Oil Condensate Used on Property", "US": "In-Use", "UU": "Used", "V1": "Retention Quantity", "V2": "Available Quantity", "V3": "Transfer Quantity", "V4": "Surveys in Average Rating", "V5": "Vacancies", "VA": "Volume Shrinkage Adjustment or Allowance", "VB": "Blank Votes", "VC": "Cumulative Earned Value", "VD": "Scattered Votes", "VE": "Earned Value", "VF": "Federal Votes", "VG": "Gas Vented", "VH": "Schedule Variance", "VI": "Cumulative Schedule Variance", "VJ": "Cumulative Variance", "VK": "Estimate at Complete", "VL": "At Complete Variance", "VM": "Variance Adjustment", "VN": "No Votes", "VP": "Presidential Votes", "VR": "Variance", "VS": "Visits", "VT": "Votes", "VV": "Void Votes", "VY": "Yes Votes", "WA": "Total number of Workers' Compensation First Reports", "WB": "Total number of Workers' Compensation Subsequent Reports", "WC": "Total number of Workers' Compensation Combined Reports", "WD": "Units Worked per Day", "WE": "Limited Quantity", "WG": "Weight Gain", "WL": "Weight Loss", "WO": "Operator's Working Interest", "WP": "Number of Producing Wells Remaining on Property or Facility", "WR": "Number of Producing Wells Remaining on Royalty Account", "WT": "Total Working Interest", "WV": "Water Volume", "WW": "Weeks Worked", "WX": "License Withdrawal Duration", "WY": "License Withdrawals Sent", "X1": "Producing Wells", "XA": "Total of Issuable Assets", "XB": "Total System Backorder Quantity, High Priority", "XC": "Total Service Backorder Quantity, High Priority", "XD": "Total System Backorder Quantity, Low Priority", "XE": "Total Service Backorder Quantity, Low Priority", "XG": "On Hand and Due-In", "XI": "Installment Payments", "XJ": "Other War Reserve Material Requirements Protectable (OWRMRP) " "Quantity", "XL": "Approximate Number of Units Projected", "XN": "Approximate Number of Holders", "XO": "Circulating Oil", "XT": "Protected Quantity", "XU": "Reserved", "XV": "Requisitioning Objective", "XX": "Authorized Retention Level", "XY": "Safety Level", "XZ": "Backorder Lines", "YA": "Total Demand Quantity", "YB": "Total Demand Orders", "YC": "First Quarter Recurring Demand", "YD": "First Quarter Recurrring Orders", "YE": "First Quarter Non-recurring Demand", "YF": "First Quarter Non-recurring Orders", "YG": "Second Quarter Recurring Demand", "YH": "Second Quarter Recurring Orders", "YJ": "Second Quarter Non-recurring Demand", "YK": "Second Quarter Non-recurring Orders", "YL": "Third Quarter Recurring Demand", "YM": "Third Quarter Recurring Orders", "YN": "Third Quarter Non-recurring Demand", "YP": "Third Quarter Non-recurring Orders", "YQ": "Fourth Quarter Recurring Demand", "YR": "Fourth Quarter Recurring Orders", "YS": "Fourth Quarter Non-recurring Demand", "YT": "Fourth Quarter Non-recurring Orders", "YW": "Reorder Point Quantity", "YX": "Contract Line Item Quantity", "YY": "Years", "Z1": "Units Worked Last Day", "Z2": "Units Worked per Week", "Z3": "Units Worked per Quarter", "Z4": "Number Weeks Paid", "Z6": "Unused Accumulated Sick Days", "ZA": "Federal Medicare or Medicaid Claim Mandate - Category 1", "ZB": "Federal Medicare or Medicaid Claim Mandate - Category 2", "ZC": "Federal Medicare or Medicaid Claim Mandate - Category 3", "ZD": "Federal Medicare or Medicaid Claim Mandate - Category 4", "ZE": "Federal Medicare or Medicaid Claim Mandate - Category 5", "ZF": "Federal Pension Mandate - Category 1", "ZG": "Federal Pension Mandate - Category 2", "ZH": "Federal Pension Mandate - Category 3", "ZI": "Holding Period", "ZJ": "Federal Pension Mandate - Category 5", "ZK": "Federal Medicare or Medicaid Payment Mandate - Category 1", "ZL": "Federal Medicare or Medicaid Payment Mandate - Category 2", "ZM": "Federal Medicare or Medicaid Payment Mandate - Category 3", "ZN": "Federal Medicare or Medicaid Payment Mandate - Category 4", "ZO": "Federal Medicare or Medicaid Payment Mandate - Category 5", "ZP": "Federal Pension Mandate - Category 4", "ZQ": "Shares Added", "ZR": "Extended Term", "ZS": "Amortization Term", "ZT": "Beginning Shares", "ZU": "Shares Deleted", "ZW": "Current Share Balance", } time_period_qualifier = { "1": "Chargeable Periods", "2": "Periods Held", "3": "Free Periods", "4": "Saturdays, Sundays and Holidays", "5": "Other Allowance Periods", "6": "Hour", "7": "Day", "8": "Not Applicable", "10": "Six Hours", "11": "12 Hours", "12": "18 Hours", "13": "24 Hours", "14": "Debit Days", "15": "Credit Days", "16": "Excess Days", "17": "Hazardous Days", "18": "Holidays", "19": "Saturdays and Sundays", "20": "Sundays and Holidays", "21": "Years", "22": "Service Year", "23": "Calendar Year", "24": "Year to Date", "25": "Contract", "26": "Episode", "27": "Visit", "28": "Outlier", "29": "Remaining", "30": "Exceeded", "31": "Not Exceeded", "32": "Lifetime", "33": "Lifetime Remaining", "34": "Month", "35": "Week", "36": "Admisson", "Z": "Mutually Defined", } delivery_or_calendar_pattern_code = { "1": "1st Week of the Month", "2": "2nd Week of the Month", "3": "3rd Week of the Month", "4": "4th Week of the Month", "5": "5th Week of the Month", "6": "1st & 3rd Weeks of the Month", "7": "2nd & 4th Weeks of the Month", "8": "1st Working Day of Period", "9": "Last Working Day of Period", "A": "Monday through Friday", "B": "Monday through Saturday", "C": "Monday through Sunday", "D": "Monday", "E": "Tuesday", "F": "Wednesday", "G": "Thursday", "H": "Friday", "J": "Saturday", "K": "Sunday", "L": "Monday through Thursday", "M": "Immediately", "N": "As Directed", "O": "Daily Mon. through Fri.", "P": "1/2 Mon. & 1/2 Thurs.", "Q": "1/2 Tues. & 1/2 Thurs.", "R": "1/2 Wed. & 1/2 Fri.", "S": "Once Anytime Mon. through Fri.", "SA": "Sunday, Monday, Thursday, Friday, Saturday", "SB": "Tuesday through Saturday", "SC": "Sunday, Wednesday, Thursday, Friday, Saturday", "SD": "Monday, Wednesday, Thursday, Friday, Saturday", "SG": "Tuesday through Friday", "SL": "Monday, Tuesday and Thursday", "SP": "Monday, Tuesday and Friday", "SX": "Wednesday and Thursday", "SY": "Monday, Wednesday and Thursday", "SZ": "Tuesday, Thursday and Friday", "T": "1/2 Tue. & 1/2 Fri.", "U": "1/2 Mon. & 1/2 Wed.", "V": "1/3 Mon., 1/3 Wed., 1/3 Fri.", "W": "Whenever Necessary", "X": "1/2 By Wed., Bal. By Fri.", "Y": "None (Also Used to Cancel or Override a Previous Pattern)", "Z": "Mutually Defined", } delivery_time_pattern_code = { "A": "1st Shift (Normal Working Hours)", "B": "2nd Shift", "C": "3rd Shift", "D": "A.M.", "E": "P.M.", "F": "As Directed", "G": "Any Shift", "Y": "None (Also Used to Cancel or Override a Previous Pattern)", "Z": "Mutually Defined", } reference_id_qualifier = { "01": "American Bankers Assoc. (ABA) Transit/Routing Number (Including " "Check Digit, 9 Digits)", "02": "Society for Worldwide Interbank Financial Telecommunication " "(S.W.I.F.T.) Identification (8 or 11 Characters)", "03": "Clearing House Interbank Payment System (CHIPS) Participant " "Number (3 or 4 Digits)", "04": "Canadian Financial Institution Branch and Institution Number", "05": "Clearing House Interbank Payment System (CHIPS) User " "Identification (6 digits)", "06": "System Number", "07": "Add-On System Number", "08": "Carrier Assigned Package Identification Number", "09": "Customs Bar Code Number", "0A": "Supervisory Appraiser Certification Number", "0B": "State License Number", "0D": "Subject Property Verification Source", "0E": "Subject Property Reference Number", "0F": "Subscriber Number", "0G": "Reviewer File Number", "0H": "Comparable Property Pending Sale Reference Number", "0I": "Comparable Property Sale Reference Number", "0J": "Subject Property Non-Sale Reference Number", "0K": "Policy Form Identifying Number", "0L": "Referenced By", "0M": "Mortgage Identification Number", "0N": "Attached To", "0P": "Real Estate Owned Property Identifier", "10": "Account Managers Code", "11": "Account Number", "12": "Billing Account", "13": "Horizontal Coordinate", "14": "Master Account Number", "15": "Vertical Coordinate", "16": "Military Interdepartmental Purchase Request (MIPR) Number", "17": "Client Reporting Category", "18": "Plan Number", "19": "Division Identifier", "1A": "Blue Cross Provider Number", "1B": "Blue Shield Provider Number", "1C": "Medicare Provider Number", "1D": "Medicaid Provider Number", "1E": "Dentist License Number", "1F": "Anesthesia License Number", "1G": "Provider UPIN Number", "1H": "CHAMPUS Identification Number", "1I": "Department of Defense Identification Code (DoDIC)", "1J": "Facility ID Number", "1K": "Payor's Claim Number", "1L": "Group or Policy Number", "1M": "Preferred Provider Organization Site Number", "1N": "Diagnosis Related Group (DRG) Number", "1O": "Consolidation Shipment Number", "1P": "Accessorial Status Code", "1Q": "Error Identification Code", "1R": "Storage Information Code", "1S": "Ambulatory Patient Group (APG) Number", "1T": "Resource Utilization Group (RUG) Number", "1U": "Pay Grade", "1V": "Related Vendor Order Number", "1W": "Member Identification Number", "1X": "Credit or Debit Adjustment Number", "1Y": "Repair Action Number", "1Z": "Financial Detail Code", "20": "Repair Part Number", "21": "American Gas Association Equation Number", "22": "Special Charge or Allowance Code", "23": "Client Number", "24": "Short-term Disability Policy Number", "25": "Reason Not Lowest Cost Code", "26": "Union Number", "27": "Insuror Pool Identification Number", "28": "Employee Identification Number", "29": "Foreclosure Account Number", "2A": "Import License Number", "2B": "Terminal Release Order Number", "2C": "Long-term Disability Policy Number", "2D": "Aeronautical Equipment Reference Number (AERNO)", "2E": "Foreign Military Sales Case Number", "2F": "Consolidated Invoice Number", "2G": "Amendment", "2H": "Assigned by transaction set sender", "2I": "Tracking Number", "2J": "Floor Number", "2K": "Food and Drug Administration (FDA) Product Type", "2L": "Association of American Railroads (AAR) Railway Accounting Rules", "2M": "Federal Communications Commission (FCC) Identifier", "2N": "Federal Communications Commission (FCC) Trade/Brand Identifier", "2O": "Occupational Safety and Health Administration (OSHA) Claim Number", "2P": "Subdivision Identifier", "2Q": "Food and Drug Administration (FDA) Accession Number", "2R": "Coupon Redemption Number", "2S": "Catalog", "2T": "Sub-subhouse Bill of Lading", "2U": "Payer Identification Number", "2V": "Special Government Accounting Classification Reference Number " "(ACRN)", "2W": "Change Order Authority", "2X": "Supplemental Agreement Authority", "2Y": "Wage Determination", "2Z": "U.S. Customs Service (USCS) Anti-dumping Duty Case Number", "30": "United States Government Visa Number", "31": "Docket Number", "32": "Credit Repository Code", "33": "Lender Case Number", "34": "Loan Request Number", "35": "Multifamily Project Number", "36": "Underwriter Identification Number", "37": "Condominium Identification Number", "38": "Master Policy Number", "39": "Proposal Number", "3A": "Section of the National Housing Act Code", "3B": "Supplemental Claim Number", "3C": "Payee Loan Number", "3D": "Servicer Loan Number", "3E": "Investor Loan Number", "3F": "Show Identification", "3G": "Catastrophe Number", "3H": "Case Number", "3I": "Precinct Number", "3J": "Office Number", "3K": "Petroleum Pool Code", "3L": "Branch Identifier", "3M": "Federal Communications Commission (FCC) Condition Code", "3N": "Gas Custodian Identification", "3O": "U.S. Customs Service (USCS) Pre-approval Ruling Number", "3P": "Third Party Originator Number", "3Q": "Food and Drug Administration (FDA) Product Code", "3R": "U.S. Customs Service (USCS) Binding Ruling Number", "3S": "Provincial (Canadian) Sales Tax Exemption Number", "3T": "U.S. Customs Service (USCS) Pre-classification Ruling Number", "3U": "Protraction Number", "3V": "Formation Identifier", "3W": "U.S. Customs Service (USCS) Commercial Description", "3X": "Subcontract Number", "3Y": "Receiver Assigned Drop Zone", "3Z": "Customs Broker Reference Number", "40": "Lease Schedule Number - Replacement", "41": "Lease Schedule Number - Prior", "42": "Phone Calls", "43": "Supporting Document Number", "44": "End Use Number", "45": "Old Account Number", "46": "Old Meter Number", "47": "Plate Number", "48": "Agency's Student Number. This is the number assigned by an agency " "other than the institution sending the record.", "49": "Family Unit Number", "4A": "Personal Identification Number (PIN)", "4B": "Shipment Origin Code", "4C": "Shipment Destination Code", "4D": "Shipping Zone", "4E": "Carrier-assigned Consignee Number", "4F": "Carrier-assigned Shipper Number", "4G": "Provincial Tax Identification", "4H": "Commercial Invoice Number", "4I": "Balance-due Reference Number", "4J": "Vehicle-related Services Reference Number", "4K": "Accessorial Rail Diversion Reference Number", "4L": "Location-specific Services Reference Number", "4M": "Special Move Reference Number", "4N": "Special Payment Reference Number", "4O": "Canadian Goods & Services or Quebec Sales Tax Reference Number", "4P": "Affiliation Number", "4Q": "Call Sign", "4R": "Rule Section", "4S": "Preferred Call Sign", "4T": "North American Datum Standard (NADS)", "4U": "Market Area", "4V": "Emission Designator", "4W": "Study", "4X": "Log", "4Y": "Subhouse Bill of Lading", "4Z": "U.S. Customs Service (USCS) Countervailing Duty Case Number", "50": "State Student Identification Number", "51": "Picture Number", "52": "SWIFT (MT 100)", "53": "SWIFT (MT 202)", "54": "FEDWIRE (Federal Wire Transfer)", "55": "Sequence Number", "56": "Corrected Social Security Number", "57": "Prior Incorrect Social Security Number", "58": "Corrected Batch Number", "59": "Prior Incorrect Batch Number", "5A": "Offense Tracking", "5B": "Supplemental Account Number", "5C": "Congressional District", "5D": "Line of Credit Category", "5E": "Consumer Identifier", "5F": "Warrant", "5G": "Complaint", "5H": "Incident", "5I": "Offender Tracking", "5J": "Driver's License", "5K": "Commercial Driver's License", "5L": "Jurisdictional Community Number", "5M": "Previous Sequence", "5N": "Citation of Statute", "5O": "Citation of Opinion", "5P": "National Criminal Information Center Originating Agency " "Identification", "5Q": "State Criminal History Repository Individual Identification", "5R": "Federal Bureau of Investigation Individual Identification", "5S": "Processing Area", "5T": "Payment Location", "5U": "Flood Data Identifier", "5V": "Coupon Distribution Method", "5W": "Original Uniform Commercial Code Filing Number", "5X": "Amended Uniform Commercial Code Filing Number", "5Y": "Continuation Uniform Commercial Code Filing Number", "5Z": "Uniform Commercial Code Filing Collateral Number", "60": "Account Suffix Code", "61": "Taxing Authority Identification Number", "63": "Prior Loan Number", "64": "Jurisdictional Community Name Identifier", "65": "Total Order Cycle Number", "66": "Previous Policy Number", "67": "Previous Claim History Identifier", "68": "Dental Insurance Account Number", "69": "Dental Insurance Policy Number", "6A": "Consignee Reference Number", "6B": "U.S. Customs Service (USCS) Entry Number", "6C": "U.S. Customs Service (USCS) Entry Type Code", "6D": "U.S. Customs Service (USCS) Statement Number", "6E": "Map Reference", "6F": "Appraiser License", "6G": "Map Number", "6H": "Comparable Property Verification Source", "6I": "Comparable Property", "6J": "Census Tract", "6K": "Zone", "6L": "Agent Contract Number", "6M": "Application Number", "6N": "Claimant Number", "6O": "Cross Reference Number", "6P": "Group Number", "6Q": "Insurance License Number", "6R": "Provider Control Number", "6S": "Provider Order Ticket Number", "6T": "Pilot License Number", "6U": "Question Number", "6V": "Reissue Cession Number", "6W": "Sequence Number", "6X": "Specimen Identifier", "6Y": "Equipment Initial", "6Z": "Secretaria de Comercia y Famenta Industrial (SECOFI) Number", "70": "Calendar Number", "71": "(Working) Shift Number", "72": "Schedule Reference Number", "73": "Statement of Work (SOW)", "74": "Work Breakdown Structure (WBS)", "75": "Organization Breakdown Structure", "76": "Milestone", "77": "Work Package", "78": "Planning Package", "79": "Cost Account", "7A": "Purchase Order Number Included in On-Order Position", "7B": "Purchase Order Number of Shipment Received since Last Reporting " "Date", "7C": "Purchase Order Number of Order Received since Last Reporting Date", "7D": "Tester Identification", "7E": "Collector Identification", "7F": "Repeat Location", "7G": "Data Quality Reject Reason", "7H": "Environmental Protection Agency (EPA) Test Type Purpose Code", "7I": "Subscriber Authorization Number", "7J": "Toll Billing Telephone Reference Number", "7K": "List of Materials", "7L": "Qualified Materials List", "7M": "Frame", "7N": "Piggyback", "7O": "Tripleback", "7P": "Sheet", "7Q": "Engineering Change Order", "7R": "Representative Identification Number", "7S": "Drawing Type", "7T": "Master Contract", "7U": "Related Transaction Reference Number", "7W": "Interchange Train Identification", "7X": "Home Mortgage Disclosure Act (HMDA) State Code", "7Y": "Home Mortgage Disclosure Act (HMDA) County Code", "7Z": "Home Mortgage Disclosure Act (HMDA) Metropolitan Statistical Area " "(MSA)", "80": "Charge Number", "81": "Symbol Number (for Milestone or LOB reports)", "82": "Data Item Description (DID) Reference", "83": "Extended (or Exhibit) Line Item Number (ELIN)", "84": "Contractor Data Requirements List (CDRL)", "85": "Subcontractor Data Requirements (SDRL)", "86": "Operation Number", "87": "Functional Category", "88": "Work Center", "89": "Assembly Number", "8A": "Health Maintenance Organization (HMO) Authorization Number", "8B": "Preferred Provider Organization (PPO) Authorization Number", "8C": "Third-party Organization (TPO) Authorization Number", "8D": "Chemical Abstract Service Registry Number", "8E": "Guarantor Loan Number", "8F": "School Loan Number", "8G": "Automated Clearinghouse (ACH) Trace Number", "8H": "Check List Number", "8I": "FEDWIRE Confirmation Number", "8J": "Society for Worldwide Interbank Financial Telecommunications " "(SWIFT) Confirmation Number", "8K": "Dominion of Canada Code", "8L": "International Standard Industry Classification Code (ISIC)", "8M": "Originating Company Identifier", "8N": "Receiving Company Identifier", "8O": "Automated Clearing House (ACH) Entry Description", "8P": "Originating Depository Financial Institution Identifier", "8Q": "Receiving Depository Financial Institution Identifier", "8R": "Security Type", "8S": "Broker Identification", "8T": "Committee for Uniform Security Identification Procedure (CUSIP)", "8U": "Bank Assigned Security Identifier", "8V": "Credit Reference", "8W": "Bank to Bank Information", "8X": "Transaction Category or Type", "8Y": "Safekeeping Account Number", "8Z": "Alternate Clause Number", "90": "Subassembly Number", "91": "Cost Element", "92": "Change Document Number", "93": "Funds Authorization", "94": "File Identification Number", "95": "Committee on Uniform Securities Identification Procedures (CUSIP) " "Number", "96": "Stock Certificate Number", "97": "Package Number", "98": "Container/Packaging Specification Number", "99": "Rate Conference ID Code", "9A": "Repriced Claim Reference Number", "9B": "Repriced Line Item Reference Number", "9C": "Adjusted Repriced Claim Reference Number", "9D": "Adjusted Repriced Line Item Reference Number", "9E": "Replacement Claim Number", "9F": "Referral Number", "9G": "Department of Defense Form 250 Requirement Code", "9H": "Packaging Group Number", "9I": "Automated Clearing House (ACH) Standard Entry Class", "9J": "Pension Contract", "9K": "Servicer", "9L": "Service Bureau", "9M": "Clearing House Interbank Payments System (CHIPS) Sequence Number", "9N": "Investor", "9P": "Loan Type", "9Q": "Pool Suffix", "9R": "Job Order Number", "9S": "Delivery Region", "9T": "Tenor", "9U": "Loan Feature Code", "9V": "Payment Category", "9W": "Payer Category", "9X": "Account Category", "9Y": "Bank Assigned Bankers Reference Number", "9Z": "Chamber of Commerce Number", "A0": "Advertiser Number", "A1": "Analysis number/Test number", "A2": "Disability Insurance Account Number", "A3": "Assigment Number", "A4": "Disability Insurance Policy Number", "A5": "Educational Institution Identification Number", "A6": "Employee Identification Number", "A7": "Flexible Spending Account (FSA) Insurance Account Number", "A8": "Flexible Spending Account (FSA) Insurance Policy Number", "A9": "Health Insurance Account Number", "AA": "Accounts Receivable Statement Number", "AAA": "Distributor's Split Agent Number", "AAB": "Fund Manager's Reference Number", "AAC": "Agency Hierarchical Level", "AAD": "Officer License Number", "AAE": "Previous Distributor Number", "AAF": "Interviewer ID", "AAG": "Military ID", "AAH": "Option Policy Number", "AAI": "Payroll Account Number", "AAJ": "Prior Contract Number", "AAK": "Worksite Number", "AAL": "Agent Number", "AAM": "Treaty Identifier", "AAN": "Associated Case Control Number", "AAO": "Carrier Assigned Code", "AAP": "Dealer Number", "AAQ": "Directory Number", "AAR": "Distributor Assigned Transaction Number", "AAS": "Distributor Assigned Order Number", "AAT": "Distributor's Account Number", "AAU": "General Agency Number", "AAV": "Laboratory Number", "AAW": "Agency Assigned Number", "AAX": "List Bill Number", "AAY": "Accounting Period Reference", "AAZ": "Paramedical ID Number", "AB": "Acceptable Source Purchaser ID", "ABA": "Payroll Number", "ABB": "Personal ID Number", "ABC": "Policy Link Number", "ABD": "Secondary Policy Number", "ABE": "Special Quote Number", "ABF": "National Property Registry System Level 1", "ABG": "National Property Registry System Level 2", "ABH": "Investor Assigned Identification Number", "ABJ": "Ginnie Mae (Government National Mortgage Association) Pool " "Package Number", "ABK": "Mortgage Electronic Registration System Organization Identifier", "ABL": "Seller Loan Number", "ABM": "Sub-Servicer Loan Number", "ABN": "National Property Registry System Level 3", "ABO": "State Hazardous Waste Entity Identifier", "ABP": "Bankruptcy Procedure Number", "ABQ": "National Business Identification Number", "ABR": "Prior Data Universal Number System (D-U-N-S) Number, " "Dun & Bradstreet", "ABS": "Vessel Name", "ABT": "Security Instrument Number", "ABU": "Assignment Recording Number", "ABV": "Book Number", "ABY": "Health Care Financing Administration National Payer " "Identification Number", "AC": "Air Cargo Transfer Manifest", "ACA": "Growth Factor Reference", "ACB": "Region", "ACC": "Status", "ACD": "Class Code", "ACE": "Service Request Number", "ACF": "Supplement Number", "ACG": "Previous Ticket Number", "ACH": "One Call Agency Ticket Number", "ACI": "Ticket Number", "ACJ": "Bill of Material Revision Number", "ACK": "Drawing Revision Number", "ACL": "Application Transaction Reference Number", "ACM": "Related Object Identification Number", "ACN": "Common Access Reference Number", "ACO": "First Transfer Number", "ACP": "Continuous Transfer Number", "ACQ": "Last Transfer Number", "ACR": "Automated Clearinghouse (ACH) Return/Notification of Change " "(NOC) Code", "ACS": "Society of Property Information Compilers and Analysts", "ACT": "Accounting Code", "AD": "Acceptable Source DUNS Number", "ADA": "Agency for International Development Acquisition Regulation " "(AIDAR)", "ADB": "Master Property Number", "ADC": "Project Property Number", "ADD": "Unit Property Number", "ADE": "Associated Property Number", "ADF": "Associated Number For Limited Common Element Parking", "ADG": "Associated Number For Unit Parking", "ADH": "Associated Number For Joined Unit not re-subdivided", "ADI": "Processor Identification Number", "ADM": "Air Dimension Code", "AE": "Authorization for Expense (AFE) Number", "AEA": "Numero de Cedula de Identidad (CIN) Number", "AEB": "Company's Registry Office (CRO) Number", "AEC": "Government Registration Number", "AED": "Judicial Number", "AEE": "Numero de Identificacion Tributaria (NIT)", "AEF": "Passport Number", "AEG": "Patron Number", "AEH": "Registro Informacion Fiscal (RIF)", "AEI": "Registro Unico de Contribuyente (RUC)", "AEJ": "Superintendencia de Inversiones Extranjeras (SIEX) Number", "AEK": "Tokyo Shoko Research Business Identifier", "AEL": "Registro Nacional de Contribuyente (RNC)", "AEM": "Distribution Center Number", "AF": "Airlines Flight Identification Number", "AG": "Agent's Shipment Number", "AH": "Agreement Number", "AHC": "Air Handling Code", "AI": "Associated Invoices", "AJ": "Accounts Receivable Customer Account", "AK": "Sending Company Audit Number (Automated Clearinghouse Transfers)", "AL": "Accounting (Equipment) Location Number", "ALC": "Agency Location Code", "ALG": "Title Company Code Book Reference", "ALH": "Title Document Schedule", "ALI": "Recording Number", "ALJ": "Title Policy Number", "ALT": "Alteration Number", "AM": "Adjustment Memo (Charge Back)", "AN": "Associated Purchase Orders", "AO": "Appointment Number", "AP": "Accounts Receivable Number", "API": "American Petroleum Institute (API) Deduction Code", "AQ": "Access Code", "AR": "Arrival Code", "AS": "Acceptable Source Supplier ID", "ASL": "Atomic Safety and Licensing Board Panel (ASLBP) Number", "ASP": "Animal Species", "AST": "Animal Strain", "AT": "Appropriation Number", "ATC": "Maintenance Availability Type", "AU": "Authorization to Meet Competition Number", "AV": "Health Insurance Rating Account Number", "AW": "Air Waybill Number", "AX": "Government Accounting Class Reference Number (ACRN)", "AY": "Floor Plan Approval Number", "AZ": "Health Insurance Policy Number", "B1": "Lessee Bill Code Number", "B2": "Axle Ratio", "B3": "Preferred Provider Organization Number", "B4": "Bilateral Car Service Agreements", "B5": "Health Insurance Rating Suffix Code", "B6": "Life Insurance Billing Account Number", "B7": "Life Insurance Policy Number", "B8": "Life Insurance Billing Suffix Code", "B9": "Retirement Plan Account Number", "BA": "Retirement Plan Policy Number", "BAA": "Franchise Tax Account Number", "BAB": "Certificate of Incorporation Number", "BAC": "Beam Assembly Code", "BAD": "State Tax Identification Number", "BAE": "Charter Number", "BAF": "Receipt Number", "BAG": "Withdrawal Account Number", "BAH": "Deposit Account Number", "BAI": "Business Identification Number", "BB": "Authorization Number", "BC": "Buyer's Contract Number", "BCI": "Basic Contract Line Item Number", "BD": "Bid Number", "BE": "Business Activity", "BF": "Billing Center Identification", "BG": "Beginning Serial Number", "BH": "Lease Schedule Number - Blanket", "BI": "Bonded Carrier Internal Revenue Service Identification Number", "BJ": "Carrier's Customs Bond Number", "BK": "Broker's Order Number", "BKT": "Bank Telegraphic Number", "BL": "Government Bill of Lading", "BLT": "Billing Type", "BM": "Bill of Lading Number", "BMM": "Begin Mile Marker", "BN": "Booking Number", "BO": "Bin Location Number", "BOI": "Binary Object Identifier", "BP": "Adjustment Control Number", "BQ": "Health Maintenance Organization Code Number", "BR": "Broker or Sales Office Number", "BS": "Split Booking Number", "BT": "Batch Number", "BU": "Buyer's Approval Mark", "BV": "Purchase Order Line Item Identifier (Buyer)", "BW": "Blended With Batch Number", "BX": "Buyer's Shipment Mark Number", "BY": "Repair Category Number", "BZ": "Complaint Code", "C0": "Canadian Social Insurance Number", "C1": "Customer material specification number", "C2": "Customer process specification number", "C3": "Customer specification number", "C4": "Change Number", "C5": "Customer Tracking Number For Loaned Materials", "C6": "Carnet Number", "C7": "Contract Line Item Number", "C8": "Corrected Contract Number", "C9": "Previous Credit/Debit Adjustment Number", "CA": "Cost Allocation Reference", "CB": "Combined Shipment", "CBG": "Census Block Group", "CC": "Contract Co-op Number", "CD": "Credit Note Number", "CDN": "Citizenship Document Number", "CE": "Class of Contract Code", "CF": "Fleet Reference Number", "CG": "Consignee's Order Number", "CH": "Customer catalog number", "CI": "Unique Consignment Identifier", "CIR": "Circuit Number", "CIT": "Citation", "CJ": "Clause Number", "CK": "Check Number", "CL": "Seller's Credit Memo", "CM": "Buyer's Credit Memo", "CMN": "Continuous Move Number", "CMP": "Customer Maintenance Period Sequence Number", "CMT": "Component", "CN": "Carrier's Reference Number (PRO/Invoice)", "CNO": "Commitment Number", "CO": "Customer Order Number", "COL": "Collocation Indicator", "COT": "Certificate of Transportation", "CP": "Condition of Purchase Document Number", "CPA": "Canadian Province Operating Authority Number", "CPT": "Current Procedural Terminology Code", "CQ": "Customshouse Broker License Number", "CR": "Customer Reference Number", "CRN": "Casualty Report Number", "CRS": "Casualty Report Serial Number", "CS": "Condition of Sale Document Number", "CSC": "CS54 Key Train Indicator Code", "CSG": "CS54 Key Train Indicator Group Name", "CST": "Census State Code", "CT": "Contract Number", "CTS": "Census Tract Suffix", "CU": "Clear Text Clause", "CV": "Coil Number", "CW": "Canadian Wheat Board Permit Number", "CX": "Consignment Classification ID", "CY": "Commercial Registration Number", "CYC": "Periodicity Code", "CZ": "Contract Rider Number (Used in conjunction with contract number)", "D0": "Data Reliability Code", "D1": "Drug Enforcement Administration Order Blank Number", "D2": "Supplier Document Identification Number", "D3": "National Association of Boards of Pharmacy Number", "D4": "Cut Number", "D5": "Dye Lot Number", "D6": "Duplicate Bill Number", "D7": "Coverage Code", "D8": "Loss Report Number", "D9": "Claim Number", "DA": "Domicile Branch Number", "DB": "Buyer's Debit Memo", "DC": "Dealer purchase order number", "DD": "Document Identification Code", "DE": "Depositor Number", "DF": "Defense Federal Acquisition Regulations (DFAR)", "DG": "Drawing Number", "DH": "Drug Enforcement Administration Number", "DHH": "Department of Health and Human Services Acquisition Regulation " "(HHSAR)", "DI": "Distributor Invoice Number", "DIS": "District Number", "DJ": "Delivery Ticket Number", "DK": "Dock Number", "DL": "Seller's Debit Memo", "DM": "Associated Product Number", "DN": "Draft Number", "DNR": "Deposit Number", "DNS": "D-U-N-S+4, D-U-N-S Number with Four Character Suffix", "DO": "Delivery Order Number", "DOA": "Department of Agriculture Acquisition Regulation (AGAR)", "DOC": "Department of Commerce Acquisition Regulation (CAR)", "DOE": "Department of Energy Acquisition Regulation (DEAR)", "DOI": "Department of Interior Acquisition Regulation (DIAR)", "DOJ": "Department of Justice Acquisition Regulation (JAR)", "DOL": "Department of Labor Acquisition Regulation (DOLAR)", "DON": "Density Order Number", "DOS": "Department of State Acquisition Regulation (DOSAR)", "DOT": "Department of Transportation Acquisition Regulation (TAR)", "DP": "Department Number", "DQ": "Delivery Quote Number", "DR": "Dock Receipt Number", "DRN": "Drainhole Number", "DS": "Defense Priorities Allocation System (DPAS) Priority Rating", "DSC": "Departure from Specification Class Code", "DSI": "Departure from Specification Number", "DST": "Departure from Specification Type Code", "DT": "Downstream Shipper Contract Number", "DTS": "Department of the Treasury Acquisition/Procurement Regulation " "(TAPR)", "DU": "Dependents Information", "DUN": "D-U-N-S Number Dun & Bradstreet", "DV": "Diversion Authority Number", "DW": "Deposit Sequence Number", "DX": "Department/Agency Number", "DY": "Department of Defense Transportation Service Code Number " "(Household Goods)", "DZ": "Certified Registered Nurse Anesthetist (CRNA) Provider " "Identification Number", "E1": "Emergency Order Number", "E2": "Part Causing Repair Number", "E3": "Expansion on Effect of Change Number", "E4": "Charge Card Number", "E5": "Claimant's Claim Number", "E6": "Backout Procedure Code", "E7": "Service Bulletin Number", "E8": "Service Contract (Coverage) Number", "E9": "Attachment Code", "EA": "Medical Record Identification Number", "EB": "Embargo Permit Number", "EC": "Circular", "ED": "Export Declaration", "EDA": "Department of Education Acquisition Regulation (EDAR)", "EE": "Election District", "EF": "Electronic Funds Transfer ID Number", "EG": "Ending Serial Number", "EH": "Financial Classification Code", "EI": "Employer's Identification Number", "EJ": "Patient Account Number", "EK": "Healthcare Manpower Shortage Area (HMSA) Facility Identification " "Number", "EL": "Electronic device pin number", "EM": "Electronic Payment Reference Number", "EMM": "End Mile Marker", "EN": "Embargo Number", "END": "Endorsement Number", "EO": "Submitter Identification Number", "EP": "Export Permit Number", "EPA": "Environmental Protection Agency Acquisition Regulation (EPAAR)", "EPB": "Environmental Protection Agency Transporter Identification Number", "EQ": "Equipment Number", "ER": "Container or Equipment Receipt Number", "ES": "Employer's Social Security Number", "ESN": "Estimate Sequence Number", "ET": "Excess Transportation", "EU": "End User's Purchase Order Number", "EV": "Receiver Identification Number", "EW": "Mammography Certification Number", "EX": "Estimate Number", "EY": "Receiver Sub-identification Number", "EZ": "Electronic Data Interchange Agreement Number", "F1": "Version Code - National", "F2": "Version Code - Local", "F3": "Submission Number", "F4": "Facility Certification Number", "F5": "Medicare Version Code", "F6": "Health Insurance Claim (HIC) Number", "F7": "New Health Insurance Claim (HIC) Number", "F8": "Original Reference Number", "F9": "Freight Payor Reference Number", "FA": "Federal Acquisition Regulations (FAR)", "FB": "File Transfer Form Number", "FC": "Filer Code Issued by Customs", "FCN": "Assigned Contract Number", "FD": "Filer Code Issued by Bureau of Census", "FE": "Failure mechanism number", "FF": "Film Number", "FG": "Fund Identification Number", "FH": "Clinic Number", "FI": "File Identifier", "FJ": "Line Item Control Number", "FK": "Finish Lot Number", "FL": "Fine Line Classification", "FLZ": "Flood Zone", "FM": "Federal Maritime Commisssion (FMC) Forwarders Number", "FMP": "Facility Measurement Point Number", "FN": "Forwarder's/Agent's Reference Number", "FND": "Finder Number", "FO": "Drug Formulary Number", "FP": "Forestry Permit Number", "FQ": "Form Number", "FR": "Freight Bill Number", "FS": "Final Sequence Number", "FSN": "Assigned Sequence Number", "FT": "Foreign Trade Zone", "FTN": "Premarket Notification Number", "FU": "Fund Code", "FV": "Health Maintenance Organization (HMO) Reference Number", "FW": "State License Identification Number", "FWC": "Final Work Candidate Number", "FX": "Failure Analysis Report Number", "FY": "Claim Office Number", "FZ": "Processor's Invoice Number", "G1": "Prior Authorization Number", "G2": "Provider Commercial Number", "G3": "Predetermination of Benefits Identification Number", "G4": "Peer Review Organization (PRO) Approval Number", "G5": "Provider Site Number", "G6": "Payer Assigned Resubmission Reference Number", "G7": "Resubmission Reason Code", "G8": "Resubmission Number", "G9": "Secondary Employee Identification Number", "GA": "Government Advance Progress", "GB": "Grain Block Number", "GC": "Government Contract Number", "GD": "Return Goods Bill of Lading Number", "GE": "Geographic Number", "GF": "Specialty License Number", "GG": "Gauge Ticket Number", "GH": "Identification Card Serial Number", "GI": "Secondary Provider Number", "GJ": "Cornbore Certification Number", "GK": "Third Party Reference Number", "GL": "Geographic Destination Zone Number", "GM": "Loan Acquisition Number", "GN": "Folder Number", "GO": "Exhibit Identifier", "GP": "Government Priority Number", "GQ": "Internal Purchase Order Release Number", "GR": "Grain Order Reference Number", "GS": "General Services Administration Regulations (GSAR)", "GT": "Goods and Service Tax Registration Number", "GU": "Internal Purchase Order Item Number", "GV": "Third Party Purchase Order Number", "GW": "Third Party Purchase Order Release Number", "GWS": "Group Work Candidate Sequence Number", "GX": "Third Party Purchase Order Item Number", "GY": "Empty Repositioning Number", "GZ": "General Ledger Account", "H1": "High Fabrication Authorization Number", "H2": "High Raw Material Authorization Number", "H3": "Gravity Source Meter Number", "H4": "Federal Information Resources Management Regulation", "H5": "Special Clause", "H6": "Quality Clause", "H7": "Standard Clause", "H8": "Home Mortgage Disclosure Act (HMDA) Census Tract", "H9": "Payment History Reference Number", "HA": "Competent Authority", "HB": "Bill & Hold Invoice Number", "HC": "Heat Code", "HD": "Department of Transportation Hazardous Number", "HE": "Hazardous Exemption Number", "HF": "Engineering Data List", "HG": "Civil Action Number", "HH": "Fiscal Code", "HHT": "Type of Household Goods Code", "HI": "Health Industry Number (HIN)", "HJ": "Identity Card Number", "HK": "Judgment Number", "HL": "SIREN Number", "HM": "SIRET Number", "HMB": "Home Mortgage Disclosure Act Block Number Area", "HN": "Hazardous Certification Number", "HO": "Shipper's Hazardous Number", "HP": "Pack & Hold Invoice Number", "HPI": "Health Care Financing Administration National Provider Identifier", "HQ": "Reinsurance Reference", "HR": "Horsepower", "HS": "Harmonized Code System (Canada)", "HT": "Code of Federal Regulations", "HU": "Type of Escrow Number", "HUD": "Department of Housing and Urban Development Acquisition " "Regulation (HUDAR)", "HV": "Escrow File Number", "HW": "High/Wide File Number", "HX": "Auto Loss Item Number", "HY": "Property Loss Item Number", "HZ": "Tax Agency Number (MERS [Mortgage Electronic Registration System] " "Federal Information Processing Standards [FIPS] Based Number)", "I1": "Owning Bureau Identification Number", "I2": "Interstate Commerce Commission (ICC) Account Number", "I3": "Non-American Identification Number", "I4": "Credit Counseling Identification Number", "I5": "Invoice Identification", "I7": "Credit Report Number", "I8": "Social Insurance Number", "I9": "Pollutant", "IA": "Internal Vendor Number", "IB": "In Bond Number", "IC": "Inbound-to Party", "ICD": "ICD-9-CM (International Classification of Diseases)", "ID": "Insurance Certificate Number", "IE": "Interchange Agreement Number", "IF": "Issue Number", "IFT": "International Fuel Tax Agreement Account Number", "IG": "Insurance Policy Number", "IH": "Initial Dealer Claim Number", "II": "Initial Sample Inspection Report Number", "IID": "Image Identifier", "IJ": "Standard Industry Classification (SIC) Code", "IK": "Invoice Number", "IL": "Internal Order Number", "IM": "Intergovernmental Maritime Organization (IMO) Number", "IMP": "Integrated Master Plan (IMP)", "IMS": "Integrated Master Schedule (IMS)", "IN": "Consignee's Invoice Number", "IND": "Investigatorial New Drug Number", "IO": "Inbound-to or Outbound-from Party", "IP": "Inspection Report Number", "IQ": "End Item", "IR": "Intra Plant Routing", "IRN": "Importer's Reference Number to Letter of Credit", "IRP": "International Registration Plan Account Number", "IS": "Invoice Number Suffix", "ISC": "International Standard Industrial Classification (ISIC) " "Dominion of Canada Code (DCC)", "ISN": "International Registration Plan Sticker Number", "ISS": "Inspection and Survey Sequence Number", "IT": "Internal Customer Number", "IU": "Barge Permit Number", "IV": "Seller's Invoice Number", "IW": "Part Interchangeability", "IX": "Item Number", "IZ": "Insured Parcel Post Number", "J0": "Proceeding", "J1": "Creditor", "J2": "Attorney", "J3": "Judge", "J4": "Trustee", "J5": "Originating Case", "J6": "Adversary Case", "J7": "Lead Case", "J8": "Jointly Administered Case", "J9": "Substantively Consolidated Case", "JA": "Beginning Job Sequence Number", "JB": "Job (Project) Number", "JC": "Review", "JD": "User Identification", "JE": "Ending Job Sequence Number", "JF": "Automated Underwriting Reference Number", "JH": "Tag", "JI": "Multiple Listing Service Area", "JK": "Multiple Listing Service Sub-area", "JL": "Packet", "JM": "Multiple Listing Service Map X Coordinate", "JN": "Multiple Listing Service Map Y Coordinate", "JO": "Multiple Listing Number", "JP": "Multiple Listing Service Book Type", "JQ": "Elevation", "JR": "Property Component Location", "JS": "Job Sequence Number", "JT": "Prior Tax Identification Number (TIN)", "JU": "Prior Phone Number", "JV": "Prior Health Industry Number", "JW": "Prior Universal Provider Identification Number (UPIN)", "JX": "Prior Postal Zip Code", "JY": "Origin of Shipment Harmonized-Based Code", "JZ": "Governing Class Code", "K0": "Approval Code", "K1": "Foreign Military Sales Notice Number", "K2": "Certified Mail Number", "K3": "Registered Mail Number", "K4": "Criticality Designator", "K5": "Task Order", "K6": "Purchase Description", "K7": "Paragraph Number", "K8": "Project Paragraph Number", "K9": "Inquiry Request Number", "KA": "Distribution List", "KB": "Beginning Kanban Serial Number", "KC": "Exhibit Distribution List", "KD": "Special Instructions Number", "KE": "Ending Kanban Serial Number", "KF": "Pre-award Survey", "KG": "Foreclosing Status", "KH": "Type of Law Suit", "KI": "Type of Outstanding Judgment", "KJ": "Tax Lien Jurisdiction", "KK": "Delivery Reference", "KL": "Contract Reference", "KM": "Rental Account Number", "KN": "Census Automated Files ID", "KO": "Customs Drawback Entry Number", "KP": "Health Certificate Number", "KQ": "Procuring Agency", "KR": "Response to a Request for Quotation Reference", "KS": "Solicitation", "KT": "Request for Quotation Reference", "KU": "Office Symbol", "KV": "Distribution Statement Code", "KW": "Certification", "KX": "Representation", "KY": "Site Specific Procedures, Terms, and Conditions", "KZ": "Master Solicitation Procedures, Terms, and Conditions", "L1": "Letters or Notes", "L2": "Location on Product Code", "L3": "Labor Operation Number", "L4": "Proposal Paragraph Number", "L5": "Subexhibit Line Item Number", "L6": "Subcontract Line Item Number", "L7": "Customer's Release Number", "L8": "Consignee's Release Number", "L9": "Customer's Part Number", "LA": "Shipping Label Serial Number", "LB": "Lockbox", "LC": "Lease Number", "LD": "Loan Number", "LE": "Lender Entity Number", "LEN": "Location Exception Order Number", "LF": "Assembly Line Feed Location", "LG": "Lease Schedule Number", "LH": "Longitude Expressed in Seconds", "LI": "Line Item Identifier (Seller's)", "LIC": "Health Industry Business Communications Council (HIBCC) Labeler " "Identification Code (LIC)", "LJ": "Local Jurisdiction", "LK": "Longitude expressed in Degrees, Minutes and Seconds", "LL": "Latitude Expressed in Seconds", "LM": "Product Period for which Labor Costs are Firm", "LN": "Non pickup Limited Tariff Number", "LO": "Load Planning Number", "LOI": "Logical Observation Identifier Names and Codes (LOINC)", "LP": "For Pickup Limited Freight Tariff Number", "LQ": "Latitude Expressed in Degrees, Minutes and Seconds", "LR": "Local Student Identification Number", "LS": "Bar-Coded Serial Number", "LSD": "Logistics Support Documentation Type Code", "LT": "Lot Number", "LU": "Location Number", "LV": "License Plate Number", "LVO": "Levying Officer Identification", "LW": "Location Within Equipment", "LX": "Qualified Products List", "LY": "Destination of Shipment Harmonized-Based Code", "LZ": "Lender Account Number", "M1": "Material Storage Location", "M2": "Major Force Program", "M3": "Crop Year", "M5": "Lease Agreement Amendment Number - Master", "M6": "Military Ordnance Security Risk Number", "M7": "Medical Assistance Category", "M8": "Limited Partnership Identification Number", "M9": "Tax Shelter Number", "MA": "Ship Notice/Manifest Number", "MB": "Master Bill of Lading", "MBX": "Mailbox", "MC": "Microfilm Number", "MCI": "Motor Carrier Identification Number", "MD": "Magazine Code", "MDN": "Hazardous Waste Manifest Document Number", "ME": "Message Address or ID", "MF": "Manufacturers Part Number", "MG": "Meter Number", "MH": "Manufacturing Order Number", "MI": "Mill Order Number", "MJ": "Model Number", "MK": "Manifest Key Number", "ML": "Military Rank/Civilian Pay Grade Number", "MM": "Master Lease Agreement Number", "MN": "MICR Number", "MO": "Manufacturing Operation Number", "MP": "Multiple P.O.s of an Invoice", "MQ": "Meter Proving Report Number", "MR": "Merchandise Type Code", "MS": "Manufacturer's Material Safety Data Sheet Number", "MSL": "Mail Slot", "MT": "Meter Ticket Number", "MU": "Military Specification (MILSPEC) Number", "MV": "Migrant Number, This number is assigned by the national Migrant " "Records Transfer System", "MW": "Military Call Number", "MX": "Material Change Notice Number", "MY": "Model year number", "MZ": "Maintenance Request Number", "MZO": "Multiple Zone Order Number", "N0": "Nomination Number", "N1": "Local School Course Number", "N2": "Local School District Course Number", "N3": "Statewide Course Number", "N4": "United States Department of Education, National Center for " "Education Statistics (NCES) Course Number", "N5": "Provider Plan Network Identification Number", "N6": "Plan Network Identification Number", "N7": "Facility Network Identification Number", "N8": "Secondary Health Insurance Identification Number", "N9": "Data Authentication Number", "NA": "North American Hazardous Classification Number", "NAS": "National Aeronautics and Space Administration FAR Supplement " "(NFS)", "NB": "Letter of Credit Number", "NC": "Secondary Coverage Company Number", "ND": "Letter of Credit Draft Number", "NDA": "Abbreviated New Drug Application Number", "NDB": "New Drug Application Number", "NE": "Lease Rider Number", "NF": "National Association of Insurance Commissioners (NAIC) Code", "NFC": "National Flood Insurance Program Community Name", "NFD": "National Flood Insurance Program County", "NFM": "National Flood Insurance Program Map Number", "NFN": "National Flood Insurance Program Community Number", "NFS": "National Flood Insurance Program State", "NG": "Natural Gas Policy Act Category Code", "NH": "Rate Card Number", "NI": "Military Standard (MIL-STD) Number", "NJ": "Technical Document Number", "NK": "Prior Case", "NL": "Technical Order Number", "NM": "Discounter Registration Number", "NN": "Nonconformance Report Number", "NO": "No OT5 Authority-zero Mileage Rate", "NP": "Partial Payment Number", "NQ": "Medicaid Recipient Identification Number", "NR": "Progress Payment Number", "NS": "National Stock Number", "NT": "Administrator's Reference Number", "NU": "Pending Case", "NW": "Associated Policy Number", "NX": "Related Nonconformance Number", "NY": "Agent Claim Number", "NZ": "Critical Application", "O1": "Outer Continental Shelf Area Code", "O2": "Outer Continental Shelf Block Number", "O5": "OT5 Authority-Condition or Restriction on Car Hire Rate", "O7": "On-line Procurement and Accounting Control (OPAC) Transaction", "O8": "Original Filing", "O9": "Continuation Filing", "OA": "Outlet Number", "OB": "Ocean Bill of Lading", "OC": "Ocean Container Number", "OD": "Original Return Request Reference Number", "OE": "Open and Prepaid Station List Number", "OF": "Operator Identification Number", "OG": "Termination Filing", "OH": "Origin House", "OI": "Original Invoice Number", "OIC": "Object Identifier", "OJ": "Amendment Filing", "OK": "Offer Group", "OL": "Original Shipper's Bill of Lading Number", "OM": "Ocean Manifest", "ON": "Dealer Order Number", "OP": "Original Purchase Order", "OQ": "Order Number", "OR": "Order/Paragraph Number", "OS": "Outbound-from Party", "OT": "Sales Allowance Number", "OU": "Tariff Supplement Number", "OV": "Tariff Suffix Number", "OW": "Service Order Number", "OX": "Statement Number", "OZ": "Product Number", "P1": "Previous Contract Number", "P2": "Previous Drug Enforcement Administration Number", "P3": "Previous customer reference number", "P4": "Project Code", "P5": "Position Code", "P6": "Pipeline Number", "P7": "Product Line Number", "P8": "Pickup Reference Number", "P9": "Page Number", "PA": "Price Area Number", "PAC": "Patent Cooperation Treaty Application Number", "PAN": "Nonprovisional Patent Application Number", "PAP": "Provisional Patent Application Number", "PB": "Payer's Financial Institution Account Number for Check, Draft, or " "Wire Payments; Originating Company Account Number for ACH " "Transfers", "PC": "Production Code", "PCC": "Pool Contract Code", "PCN": "Protocol Number", "PD": "Promotion/Deal Number", "PDL": "Previous Driver's License", "PE": "Plant Number", "PF": "Prime Contractor Contract Number", "PG": "Product Group", "PGC": "Packing Group Code", "PGN": "Plug Number", "PGS": "Proposed Group Work Candidate Sequence Number", "PH": "Priority Rating", "PHC": "Process Handling Code", "PI": "Price List Change or Issue Number", "PID": "Program Identification Number", "PIN": "Platform Indentification Number", "PJ": "Packer Number", "PK": "Packing List Number", "PL": "Price List Number", "PLA": "Product Licensing Agreement Number", "PLN": "Proposed Contract Number", "PM": "Part Number", "PMN": "Premarket Application Number", "PN": "Permit Number", "PNN": "Patent Number", "PO": "Purchase Order Number", "POL": "Policy Number", "PP": "Purchase Order Revision Number", "PQ": "Payee Identification", "PR": "Price Quote Number", "PRS": "Previously Reported Social Security Number", "PRT": "Product Type", "PS": "Purchase Order Number Suffix", "PSI": "Previous Shipment Identification Number - Continuous Move", "PSL": "Next Shipment Identification Number - Continuous Move", "PSM": "Credit Card", "PSN": "Proposed Sequence Number", "PT": "Purchase Option Agreement", "PTC": "Patent Type", "PU": "Previous Bill of Lading Number", "PV": "Product change information number", "PW": "Prior purchase order number", "PWC": "Preliminary Work Candidate Number", "PWS": "Proposed Work Candidate Sequence Number", "PX": "Previous Invoice Number", "PY": "Payee's Financial Institution Account Number for Check, Draft or " "Wire Payments; Receiving Company Account Number for ACH Transfer", "PZ": "Product Change Notice Number", "Q1": "Quote Number", "Q2": "Starting Package Number", "Q3": "Ending Package Number", "Q4": "Prior Identifier Number", "Q5": "Property Control Number", "Q6": "Recall Number", "Q7": "Receiver Claim Number", "Q8": "Registration Number", "Q9": "Repair Order Number", "QA": "Press Identifier", "QB": "Press Form Identifier", "QC": "Product Specification Document Number", "QD": "Replacement Drug Enforcement Administration Number", "QE": "Replacement Customer Reference Number", "QF": "Quality Disposition Area Identifier", "QG": "Replacement Assembly Model Number", "QH": "Replacement Assembly Serial Number", "QI": "Quality Inspection Area Identifier", "QJ": "Return Material Authorization Number", "QK": "Sales Program Number", "QL": "Service Authorization Number", "QM": "Quality Review Material Crib Identifier", "QN": "Stop Sequence Number", "QO": "Service Estimate Number", "QP": "Substitute Part Number", "QQ": "Unit Number", "QR": "Quality Report Number", "QS": "Warranty Coverage Code", "QT": "Warranty Registration Number", "QU": "Change Verification Procedure Code", "QV": "Major System Affected Code", "QW": "New Part Number", "QX": "Old Part Number", "QY": "Service Performed Code", "QZ": "Reference Drawing Number", "R0": "Regiristo Federal de Contribuyentes (Mexican Federal Tax ID " "Number)", "R1": "Current Revision Number", "R2": "Canceled Revision Number", "R3": "Correction Number", "R4": "Tariff Section Number", "R5": "Tariff Page Number", "R6": "Tariff Rule Number", "R7": "Accounts Receivable Open Item", "R8": "Rental Agreement Number", "R9": "Rejection Number", "RA": "Repetitive Cargo Shipment Number", "RAA": "Restricted Availability Authorization", "RAN": "Restricted Availability Number", "RB": "Rate code number", "RC": "Rail Routing Code", "RD": "Reel Number", "RE": "Release Number", "REC": "Related Case", "RF": "Export Reference Number", "RG": "Route Order Number-Domestic", "RGI": "Regulatory Guideline Identifier", "RH": "Route Order Number-Export", "RI": "Release invoice number for prior bill and hold", "RIG": "Rig Number", "RJ": "Route Order Number-Emergency", "RK": "Rack Type Number", "RL": "Reserve Assembly Line Feed Location", "RM": "Raw material supplier Dun & Bradstreet number", "RN": "Run Number", "RO": "Repetitive Booking Number", "RP": "Repetitive Pattern Code", "RPP": "Relative Priority", "RPT": "Report Number", "RQ": "Purchase Requisition Number", "RR": "Payer's Financial Institution Transit Routing Number for Check, " "Draft or Wire Payments. Originating Depository Financial " "Institution Routing Number for ACH Transfers", "RRS": "Reconciliation Report Section Identification Code", "RS": "Returnable Container Serial Number", "RSN": "Reservation Number", "RT": "Payee's Financial Institution Transit Routing Number for Check, " "Draft or Wire Payments. Receiving Depository Financial " "Institution Transit Routing Number for ACH Transfers", "RU": "Route Number", "RV": "Receiving Number", "RW": "Repetitive Waybill Code (Origin Carrier, Standard Point Location " "Code, Repetitive Waybill Code Number)", "RX": "Resubmit number", "RY": "Rebate Number", "RZ": "Returned Goods Authorization Number", "S0": "Special Approval", "S1": "Engineering Specification Number", "S2": "Data Source", "S3": "Specification Number", "S4": "Shippers Bond Number", "S5": "Routing Instruction Number", "S6": "Stock Number", "S7": "Stack Train Identification", "S8": "Seal Off Number", "S9": "Seal On Number", "SA": "Salesperson", "SB": "Sales Region Number", "SBN": "Surety Bond Number", "SC": "Shipper Car Order Number", "SCA": "Standard Carrier Alpha Code (SCAC)", "SD": "Subday Number", "SE": "Serial Number", "SEK": "Search Key", "SES": "Session", "SF": "Ship From", "SG": "Savings", "SH": "Sender Defined Clause", "SHL": "Shelf Life Indicator", "SI": "Shipper's Identifying Number for Shipment (SID)", "SJ": "Set Number", "SK": "Service Change Number", "SL": "Sales/Territory Code", "SM": "Sales Office Number", "SN": "Seal Number", "SNH": "Systematized Nomenclature of Human and Veterinary Medicine " "(SNOMED)", "SNV": "State Non-Resident Violator Compact", "SO": "Shipper's Order (Invoice Number)", "SP": "Scan Line", "SPL": "Standard Point Location Code (SPLC)", "SPN": "Theater Screen Number", "SQ": "Container Sequence Number", "SR": "Sales Responsibility", "SS": "Split Shipment Number", "ST": "Store Number", "STB": "Standard Transportation Commodity Code (STCC) Bridge Number", "STR": "Standard Transportation Commodity Code (STCC) Replacement Code", "SU": "Special Processing Code", "SUB": "Title Reference", "SUO": "Spacing Unit Order Number", "SV": "Service Charge Number", "SW": "Seller's Sale Number", "SX": "Service Interrupt Tracking Number", "SY": "Social Security Number", "SZ": "Specification Revision", "T0": "Dealer Type Identification", "T1": "Tax Exchange Code", "T2": "Tax Form Code", "T3": "Tax Schedule Code", "T4": "Signal Code", "T5": "Trailer Use Agreements", "T6": "Tax Filing", "T7": "Affected Subsystem Code", "T8": "Description of Change Code", "T9": "Documentation Affected Number", "TA": "Telecommunication Circuit Supplemental ID", "TB": "Trucker's Bill of Lading", "TC": "Vendor Terms", "TD": "Reason for Change", "TDT": "Technical Documentation Type", "TE": "Federal Maritime Commission (FMC) Tariff Number", "TF": "Transfer Number", "TG": "Transportation Control Number (TCN)", "TH": "Transportation Account Code (TAC)", "TI": "TIR Number", "TIP": "Technical Information Package", "TJ": "Federal Taxpayer's Identification Number", "TK": "Tank Number", "TL": "Tax License Exemption", "TM": "Travel Manifest (ACI or OTR)", "TN": "Transaction Reference Number", "TO": "Terminal Operator Number", "TOC": "Type of Comment", "TP": "Test Specification Number", "TPN": "Transponder Number", "TQ": "Tracer Action Request Number", "TR": "Government Transportation Request", "TS": "Tariff Number", "TSN": "Template Sequence Number", "TT": "Terminal Code", "TU": "Trial Location Code", "TV": "Line of Business", "TW": "Tax Worksheet", "TX": "Tax Exempt Number", "TY": "Policy Type", "TZ": "Total Cycle Number", "U0": "Consolidator's Receipt Number", "U1": "Regional Account Number", "U2": "Term", "U3": "Unique Supplier Identification Number (USIN)", "U4": "Unpaid Installment Reference Number", "U5": "Successor Account", "U6": "Predecessor Account", "U8": "Mortgage Backed Security (MBS) Loan Number", "U9": "Mortgage Backed Security (MBS) Pool Number", "UA": "Mortgage Number", "UB": "Unacceptable Source Purchaser ID", "UC": "Mortgage Insurance Indicator Number", "UD": "Unacceptable Source DUNS Number", "UE": "Secondary Coverage Certificate Number", "UF": "Mortgage Insurance Company Number", "UG": "U.S. Government Transportation Control Number", "UH": "Removal Number", "UI": "Previous Course Number", "UJ": "Current or Latest Course Number", "UK": "Equivalent Course Number at Requesting Institution", "UL": "Cross-listed Course Number", "UM": "Quarter Quarter Section Number", "UN": "United Nations Hazardous Classification Number", "UO": "Quarter Quarter Spot Number", "UP": "Upstream Shipper Contract Number", "UQ": "Section Number", "UR": "Unit Relief Number", "URL": "Uniform Resource Locator", "US": "Unacceptable Source Supplier ID", "UT": "Unit Train", "UU": "Township Number", "UV": "Range Number", "UW": "State Senate District", "UX": "State Assembly District", "UY": "Federal National Mortgage Association (Fannie Mae) Loan Number", "UZ": "State Legislative District", "V0": "Version", "V1": "Volume Purchase Agreement Number", "V2": "Visa Type", "V3": "Voyage Number", "V4": "State Department I-20 Form Number", "V5": "State Department IAP-66 Form Number", "V6": "North American Free Trade Agreement (NAFTA) Compliance Number", "V7": "Judicial District", "V8": "Institution Number", "V9": "Subservicer", "VA": "Vessel Agent Number", "VB": "Department of Veterans Affairs Acquisition Regulations (VAAR)", "VC": "Vendor Contract Number", "VD": "Volume Number", "VE": "Vendor Abbreviation Code", "VF": "Vendor Change Identification Code", "VG": "Vendor Change Procedure Code", "VH": "County Legislative District", "VI": "Pool Number", "VJ": "Investor Note Holder Identification", "VK": "Institution Note Holder Identification", "VL": "Third Party Note Holder Identification", "VM": "Ward", "VN": "Vendor Order Number", "VO": "Institution Loan Number", "VP": "Vendor Product Number", "VQ": "Related Contract Line Item Number", "VR": "Vendor ID Number", "VS": "Vendor Order Number Suffix", "VT": "Motor Vehicle ID Number", "VU": "Preparer's Verification Number", "VV": "Voucher", "VW": "Standard", "VX": "Value-Added Tax Registration Number (Europe)", "VY": "Link Sequence Number", "VZ": "Sponsor's Reference Number", "W1": "Disposal Turn-In Document Number", "W2": "Weapon System Number", "W3": "Manufacturing Directive Number", "W4": "Procurement Request Number", "W5": "Inspector Identification Number", "W6": "Federal Supply Schedule Number", "W7": "Commercial and Government Entity (CAGE) Code", "W8": "Suffix", "W9": "Special Packaging Instruction Number", "WA": "Labor or Affiliation Identification", "WB": "American Petroleum Institute (API) Well", "WC": "Contract Option Number", "WCS": "Work Candidate Sequence Number", "WD": "Review Period Number", "WDR": "Withdrawal Record", "WE": "Well Classification Code", "WF": "Locally Assigned Control Number", "WG": "Vendor's Previous Job Number", "WH": "Master Reference (Link) Number", "WI": "Waiver", "WJ": "Pre-Award Survey", "WK": "Type of Science Code", "WL": "Federal Supply Classification Code", "WM": "Weight Agreement Number", "WN": "Well Number", "WO": "Work Order Number", "WP": "Warehouse Pick Ticket Number", "WQ": "Interim Funding Organization Loan Number", "WR": "Warehouse Receipt Number", "WS": "Warehouse storage location number", "WT": "Broker's Reference Number", "WU": "Vessel", "WV": "Dealer Identification", "WW": "Depository Trust Company Identification", "WX": "Distributor's Account Identification", "WY": "Waybill Number", "WZ": "Distributor's Representative Identification", "X0": "Debtor's Account", "X1": "Provider Claim Number", "X2": "Specification Class Number", "X3": "Defect Code Number", "X4": "Clinical Laboratory Improvement Amendment Number", "X5": "State Industrial Accident Provider Number", "X6": "Original Voucher Number", "X7": "Batch Sequence Number", "X8": "Secondary Suffix Code Indicator", "X9": "Internal Control Number", "XA": "Substitute National Stock Number", "XB": "Substitute Manufacturer's Part Number", "XC": "Cargo Control Number", "XD": "Subsistence Identification Number", "XE": "Transportation Priority Number", "XF": "Government Bill of Lading Office Code", "XG": "Airline Ticket Number", "XH": "Contract Auditor ID Number", "XI": "Federal Home Loan Mortgage Corporation Loan Number", "XJ": "Federal Home Loan Mortgage Corporation Default/Foreclosure " "Specialist Number", "XK": "Mortgagee Loan Number", "XL": "Insured's Loan Number", "XM": "Issuer Number", "XN": "Title XIX Identifier Number", "XO": "Sample Number", "XP": "Previous Cargo Control Number", "XQ": "Pier Number", "XR": "Railroad Commission Record Number", "XS": "Gas Analysis Source Meter Number", "XT": "Toxicology ID", "XU": "Universal Transverse Mercator - North", "XV": "Universal Transverse Mercator - East", "XW": "Universal Transverse Mercator - Zone", "XX": "Rating Period", "XY": "Other Unlisted Type of Reference Number", "XZ": "Pharmacy Prescription Number", "Y0": "Debtor", "Y1": "Claim Administrator Claim Number", "Y2": "Third-Party Administrator Claim Number", "Y3": "Contract Holder Claim Number", "Y4": "Agency Claim Number", "Y5": "Delivery Trailer Manifest", "Y6": "Sort and Segregate", "Y7": "Processing Area", "Y8": "User ID", "Y9": "Current Certificate Number", "YA": "Prior Certificate Number", "YB": "Revision Number", "YC": "Tract", "YD": "Buyer Identification", "YE": "Railroad Commission Oil Number", "YF": "Lessee Identification", "YG": "Operator Identification", "YH": "Operator Assigned Unit Number", "YI": "Refiner Identification", "YJ": "Revenue Source", "YK": "Rent Payor Identification", "YL": "Allowance Recipient Identification", "YM": "Resource Screening Reference", "YN": "Receiver ID Qualifier", "YO": "Formation", "YP": "Selling Arrangement", "YQ": "Minimum Royalty Payor Identification", "YR": "Operator Lease Number", "YS": "Yard Position", "YT": "Reporter Identification", "YU": "Payor Identification", "YV": "Participating Area", "YW": "Engineering Change Proposal", "YX": "Geographic Score", "YY": "Geographic Key", "YZ": "Geographic Index", "Z1": "Safety of Ship Certificate", "Z2": "Safety of Radio Certificate", "Z3": "Safety Equipment Certificate", "Z4": "Civil Liabilities of Oil Certificate", "Z5": "Load Line Certificate", "Z6": "Derat Certificate", "Z7": "Maritime Declaration of Health", "Z8": "Federal Housing Administration Case Number", "Z9": "Veterans Affairs Case Number", "ZA": "Supplier", "ZB": "Ultimate Consignee", "ZC": "Connecting Carrier", "ZD": "Family Member Identification", "ZE": "Coal Authority Number", "ZF": "Contractor Establishment Code (CEC)", "ZG": "Sales Representative Order Number", "ZH": "Carrier Assigned Reference Number", "ZI": "Reference Version Number", "ZJ": "Universal Railroad Revenue Waybill Identified Number (URRWIN)", "ZK": "Duplicate Waybill in Route", "ZL": "Duplicate Waybill Not in Route", "ZM": "Manufacturer Number", "ZN": "Agency Case Number", "ZO": "Makegood Commercial Line Number", "ZP": "Spouse Tie", "ZQ": "Non-Spouse Tie", "ZR": "Supplier (Replacement)", "ZS": "Software Application Number", "ZT": "Milling in Transit", "ZU": "Field", "ZV": "Block", "ZW": "Area", "ZX": "County Code", "ZY": "Referenced Pattern Identification", "ZZ": "Mutually Defined", } id_code_qualifier = { "1": "D-U-N-S Number, Dun & Bradstreet", "2": "Standard Carrier Alpha Code (SCAC)", "3": "Federal Maritime Commission (Ocean) (FMC)", "4": "International Air Transport Association (IATA)", "5": "SIRET", "6": "Plant Code", "7": "Loading Dock", "8": "UCC/EAN Global Product Identification Prefix", "9": "D-U-N-S+4, D-U-N-S Number with Four Character Suffix", "10": "Department of Defense Activity Address Code (DODAAC)", "11": "Drug Enforcement Administration (DEA)", "12": "Telephone Number (Phone)", "13": "Federal Reserve Routing Code (FRRC)", "14": "UCC/EAN Location Code Prefix", "15": "Standard Address Number (SAN)", "16": "ZIP Code", "17": "Automated Broker Interface (ABI) Routing Code", "18": "Automotive Industry Action Group (AIAG)", "19": "FIPS-55 (Named Populated Places)", "20": "Standard Point Location Code (SPLC)", "21": "Health Industry Number (HIN)", "22": "Council of Petroleum Accounting Societies code (COPAS)", "23": "Journal of Commerce (JOC)", "24": "Employer's Identification Number", "25": "Carrier's Customer Code", "26": "Petroleum Accountants Society of Canada Company Code", "27": "Government Bill Of Lading Office Code (GBLOC)", "28": "American Paper Institute", "29": "Grid Location and Facility Code", "30": "American Petroleum Institute Location Code", "31": "Bank Identification Code", "32": "Assigned by Property Operator", "33": "Commercial and Government Entity (CAGE)", "34": "Social Security Number", "35": "Electronic Mail Internal System Address Code", "36": "Customs House Broker License Number", "37": "United Nations Vendor Code", "38": "Country Code", "39": "Local Union Number", "40": "Electronic Mail User Code", "41": "Telecommunications Carrier Identification Code", "42": "Telecommunications Pseudo Carrier Identification Code", "43": "Alternate Social Security Number", "44": "Return Sequence Number", "45": "Declaration Control Number", "46": "Electronic Transmitter Identification Number (ETIN)", "47": "Tax Authority Identification", "48": "Electronic Filer Identification Number (EFIN)", "49": "State Identification Number", "50": "Business License Number", "53": "Building", "54": "Warehouse", "55": "Post Office Box", "56": "Division", "57": "Department", "58": "Originating Company Number", "59": "Receiving Company Number", "61": "Holding Mortgagee Number", "62": "Servicing Mortgagee Number", "63": "Servicer-holder Mortgagee Number", "64": "One Call Agency", "71": "Integrated Postsecondary Education Data System (IPEDS) set of " "codes maintained by the U.S. Department of Education's National " "Center of Education Statistics, Washington, D.C.", "72": "The College Board's Admission Testing Program (ATP), administered " "by the Educational Testing Service (ETS), 4-digit list of " "postsecondary educational institutions.", "73": "Federal Interagency Commission on Education (FICE) number. " "Available from the United States Department of Education, " "National Center for Education Statistics.", "74": "American College Testing (ACT) list of postsecondary educational " "institutions.", "75": "State or Province Assigned Number", "76": "Local School District or Jurisdiction Number", "77": "National Center for Education Statistics (NCES) Common Core of " "Data (CCD) number for PreK - 12 institutions", "78": "The College Board and ACT 6 digit code list of secondary " "educational institutions", "81": "Classification of Instructional Programs (CIP) coding structure " "maintained by the U.S. Department of Education's National Center " "for Education Statistics", "82": "Higher Education General Information Survey (HEGIS) maintained by " "the U.S. Department of Education's National Center for Education " "Statistics", "90": "California Ethnic Subgroups Code Table", "91": "Assigned by Seller or Seller's Agent", "92": "Assigned by Buyer or Buyer's Agent", "93": "Code assigned by the organization originating the transaction set", "94": "Code assigned by the organization that is the ultimate destination " "of the transaction set", "95": "Assigned By Transporter", "96": "Assigned By Pipeline Operator", "97": "Receiver's Code", "98": "Purchasing Office", "99": "Office of Workers Compensation Programs (OWCP) Agency Code", "A": "U.S. Customs Carrier Identification", "A1": "Approver ID", "A2": "Military Assistance Program Address Code (MAPAC)", "A3": "Assigned by Third Party", "A4": "Assigned by Clearinghouse", "A5": "Committee for Uniform Security Identification Procedures (CUSIP) " "Number", "A6": "Financial Identification Numbering System (FINS) Number", "AA": "Postal Service Code", "AB": "US Environmental Protection Agency (EPA) Identification Number", "AC": "Attachment Control Number", "AD": "Blue Cross Blue Shield Association Plan Code", "AE": "Alberta Energy Resources Conservation Board", "AL": "Anesthesia License Number", "AP": "Alberta Petroleum Marketing Commission", "BC": "British Columbia Ministry of Energy Mines and Petroleum Resources", "BD": "Blue Cross Provider Number", "BE": "Common Language Location Identification (CLLI)", "BG": "Badge Number", "BP": "Benefit Plan", "BS": "Blue Shield Provider Number", "C": "Insured's Changed Unique Identification Number", "C1": "Insured or Subscriber", "C2": "Health Maintenance Organization (HMO) Provider Number", "C5": "Customer Identification File", "CA": "Statistics Canada Canadian College Student Information System " "Course Codes", "CB": "Statistics Canada Canadian College Student Information System " "Institution Codes", "CC": "Statistics Canada University Student Information System " "Curriculum Codes", "CD": "Contract Division", "CE": "Bureau of the Census Filer Identification Code", "CF": "Canadian Financial Institution Routing Number", "CI": "CHAMPUS (Civilian Health and Medical Program of the Uniformed " "Services) Identification Number", "CL": "Corrected Loan Number", "CM": "U.S. Customs Service (USCS) Manufacturer Identifier (MID)", "CP": "Canadian Petroleum Association", "CR": "Credit Repository", "CS": "Statistics Canada University Student Information System University " "Codes", "CT": "Court Identification Code", "D": "Census Schedule D", "DG": "United States Department of Education Guarantor Identification " "Code", "DL": "United States Department of Education Lender Identification Code", "DN": "Dentist License Number", "DP": "Data Processing Point", "DS": "United States Department of Education School Identification Code", "E": "Hazard Insurance Policy Number", "EC": "ARI Electronic Commerce Location ID Code", "EH": "Theatre Number", "EI": "Employee Identification Number", "EP": "U.S. Environmental Protection Agency (EPA)", "EQ": "Insurance Company Assigned Identification Number", "ER": "Mortgagee Assigned Identification Number", "ES": "Automated Export System (AES) Filer Identification Code", "F": "Document Custodian Identification Number", "FA": "Facility Identification", "FB": "Field Code", "FC": "Federal Court Jurisdiction Identifier", "FD": "Federal Court Divisional Office Number", "FI": "Federal Taxpayer's Identification Number", "FJ": "Federal Jurisdiction", "FN": "U.S. Environmental Protection Agency (EPA) Laboratory " "Certification Identification", "G": "Payee Identification Number", "GA": "Primary Agent Identification", "GC": "GAS*CODE", "HC": "Health Care Financing Administration", "HN": "Health Insurance Claim (HIC) Number", "I": "Secondary Marketing Investor Assigned Number", "J": "Mortgage Electronic Registration System Organization Identifier", "K": "Census Schedule K", "L": "Investor Assigned Identification Number", "LC": "Agency Location Code (U.S. Government)", "LD": "NISO Z39.53 Language Codes", "LE": "ISO 639 Language Codes", "LI": "Labeler Identification Code (LIC)", "LN": "Loan Number", "M3": "Disbursing Station", "M4": "Department of Defense Routing Identifier Code (RIC)", "M5": "Jurisdiction Code", "M6": "Division Office Code", "MA": "Mail Stop", "MB": "Medical Information Bureau", "MC": "Medicaid Provider Number", "MD": "Manitoba Department of Mines and Resources", "MI": "Member Identification Number", "MK": "Market", "ML": "Multiple Listing Service Vendor - Multiple Listing Service " "Identification", "MN": "Mortgage Identification Number", "MP": "Medicare Provider Number", "MR": "Medicaid Recipient Identification Number", "N": "Insured's Unique Identification Number", "NA": "National Association of Realtors - Multiple Listing Service " "Identification", "ND": "Mode Designator", "NI": "National Association of Insurance Commissioners (NAIC) " "Identification", "NO": "National Criminal Information Center Originating Agency", "OC": "Occupation Code", "OP": "On-line Payment and Collection", "PA": "Secondary Agent Identification", "PB": "Public Identification", "PC": "Provider Commercial Number", "PI": "Payor Identification", "PP": "Pharmacy Processor Number", "PR": "Pier", "RA": "Regulatory Agency Number", "RB": "Real Estate Agent", "RC": "Real Estate Company", "RD": "Real Estate Broker Identification", "RE": "Real Estate License Number", "RT": "Railroad Track", "S": "Title Insurance Policy Number", "SA": "Tertiary Agent Identification", "SB": "Social Insurance Number", "SD": "Saskatchewan Department of Energy Mines and Resources", "SF": "Suffix Code", "SI": "Standard Industry Code (SIC)", "SJ": "State Jurisdiction", "SL": "State License Number", "SP": "Specialty License Number", "ST": "State/Province License Tag", "SV": "Service Provider Number", "SW": "Society for Worldwide Interbank Financial Telecommunications " "(SWIFT) Address", "TA": "Taxpayer ID Number", "TC": "Internal Revenue Service Terminal Code", "TZ": "Department Code", "UC": "Consumer Credit Identification Number", "UL": "UCC/EAN Location Code", "UM": "UCC/EAN Location Code Suffix", "UP": "Unique Physician Identification Number (UPIN)", "UR": "Uniform Resource Locator (URL)", "US": "Unique Supplier Identification Number (USIN)", "WR": "Wine Region Code", "XV": "Health Care Financing Administration National PlanIDRequired if " "the National PlanID is mandated for use. Otherwise, one of the " "other listed codes may be used.", "XX": "Health Care Financing Administration National Provider Identifier", "ZC": "Contractor Establishment Code", "ZN": "Zone", "ZY": "Temporary Identification Number", "A": "temporary ID to be used until a permanent ID is processed", "ZZ": "Mutually Defined", } date_or_time_qualifier = { "001": "Cancel After", "002": "Delivery Requested", "003": "Invoice", "004": "Purchase Order", "005": "Sailing", "006": "Sold", "007": "Effective", "008": "Purchase Order Received", "009": "Process", "010": "Requested Ship", "011": "Shipped", "012": "Terms Discount Due", "013": "Terms Net Due", "014": "Deferred Payment", "015": "Promotion Start", "016": "Promotion End", "017": "Estimated Delivery", "018": "Available", "019": "Unloaded", "020": "Check", "021": "Charge Back", "022": "Freight Bill", "023": "Promotion Order - Start", "024": "Promotion Order - End", "025": "Promotion Ship - Start", "026": "Promotion Ship - End", "027": "Promotion Requested Delivery - Start", "028": "Promotion Requested Delivery - End", "029": "Promotion Performance - Start", "030": "Promotion Performance - End", "031": "Promotion Invoice Performance - Start", "032": "Promotion Invoice Performance - End", "033": "Promotion Floor Stock Protect - Start", "034": "Promotion Floor Stock Protect - End", "035": "Delivered", "036": "Expiration", "037": "Ship Not Before", "038": "Ship No Later", "039": "Ship Week of", "040": "Status (After and Including)", "041": "Status (Prior and Including)", "042": "Superseded", "043": "Publication", "044": "Settlement Date as Specified by the Originator", "045": "Endorsement Date", "046": "Field Failure", "047": "Functional Test", "048": "System Test", "049": "Prototype Test", "050": "Received", "051": "Cumulative Quantity Start", "052": "Cumulative Quantity End", "053": "Buyers Local", "054": "Sellers Local", "055": "Confirmed", "056": "Estimated Port of Entry", "057": "Actual Port of Entry", "058": "Customs Clearance", "059": "Inland Ship", "060": "Engineering Change Level", "061": "Cancel if Not Delivered by", "062": "Blueprint", "063": "Do Not Deliver After", "064": "Do Not Deliver Before", "065": "1st Schedule Delivery", "066": "1st Schedule Ship", "067": "Current Schedule Delivery", "068": "Current Schedule Ship", "069": "Promised for Delivery", "070": "Scheduled for Delivery (After and Including)", "071": "Requested for Delivery (After and Including)", "072": "Promised for Delivery (After and Including)", "073": "Scheduled for Delivery (Prior to and Including)", "074": "Requested for Delivery (Prior to and Including)", "075": "Promised for Delivery (Prior to and Including)", "076": "Scheduled for Delivery (Week of)", "077": "Requested for Delivery (Week of)", "078": "Promised for Delivery (Week of)", "079": "Promised for Shipment", "080": "Scheduled for Shipment (After and Including)", "081": "Requested for Shipment (After and Including)", "082": "Promised for Shipment (After and Including)", "083": "Scheduled for Shipment (Prior to and Including)", "084": "Requested for Shipment (Prior to and Including)", "085": "Promised for Shipment (Prior to and Including)", "086": "Scheduled for Shipment (Week of)", "087": "Requested for Shipment (Week of)", "088": "Promised for Shipment (Week of)", "089": "Inquiry", "090": "Report Start", "091": "Report End", "092": "Contract Effective", "093": "Contract Expiration", "094": "Manufacture", "095": "Bill of Lading", "096": "Discharge", "097": "Transaction Creation", "098": "Bid (Effective)", "099": "Bid Open (Date Bids Will Be Opened)", "100": "No Shipping Schedule Established as of", "101": "No Production Schedule Established as of", "102": "Issue", "103": "Award", "104": "System Survey", "105": "Quality Rating", "106": "Required By", "107": "Deposit", "108": "Postmark", "109": "Received at Lockbox", "110": "Originally Scheduled Ship", "111": "Manifest/Ship Notice", "112": "Buyers Dock", "113": "Sample Required", "114": "Tooling Required", "115": "Sample Available", "116": "Scheduled Interchange Delivery", "118": "Requested Pick-up", "119": "Test Performed", "120": "Control Plan", "121": "Feasibility Sign Off", "122": "Failure Mode Effective", "124": "Group Contract Effective", "125": "Group Contract Expiration", "126": "Wholesale Contract Effective", "127": "Wholesale Contract Expiration", "128": "Replacement Effective", "129": "Customer Contract Effective", "130": "Customer Contract Expiration", "131": "Item Contract Effective", "132": "Item Contract Expiration", "133": "Accounts Receivable - Statement Date", "134": "Ready for Inspection", "135": "Booking", "136": "Technical Rating", "137": "Delivery Rating", "138": "Commerical Rating", "139": "Estimated", "140": "Actual", "141": "Assigned", "142": "Loss", "143": "Due Date of First Payment to Principal and Interest", "144": "Estimated Acceptance", "145": "Opening Date", "146": "Closing Date", "147": "Due Date Last Complete Installment Paid", "148": "Date of Local Office Approval of Conveyance of Damaged Real " "Estate Property", "149": "Date Deed Filed for Record", "150": "Service Period Start", "151": "Service Period End", "152": "Effective Date of Change", "153": "Service Interruption", "154": "Adjustment Period Start", "155": "Adjustment Period End", "156": "Allotment Period Start", "157": "Test Period Start", "158": "Test Period Ending", "159": "Bid Price Exception", "160": "Samples to be Returned By", "161": "Loaded on Vessel", "162": "Pending Archive", "163": "Actual Archive", "164": "First Issue", "165": "Final Issue", "166": "Message", "167": "Most Recent Revision (or Initial Version)", "168": "Release", "169": "Product Availability Date", "170": "Supplemental Issue", "171": "Revision", "172": "Correction", "173": "Week Ending", "174": "Month Ending", "175": "Cancel if not shipped by", "176": "Expedited on", "177": "Cancellation", "178": "Hold (as of)", "179": "Hold as Stock (as of)", "180": "No Promise (as of)", "181": "Stop Work (as of)", "182": "Will Advise (as of)", "183": "Connection", "184": "Inventory", "185": "Vessel Registry", "186": "Invoice Period Start", "187": "Invoice Period End", "188": "Credit Advice", "189": "Debit Advice", "190": "Released to Vessel", "191": "Material Specification", "192": "Delivery Ticket", "193": "Period Start", "194": "Period End", "195": "Contract Re-Open", "196": "Start", "197": "End", "198": "Completion", "199": "Seal", "200": "Assembly Start", "201": "Acceptance", "202": "Master Lease Agreement", "203": "First Produced", "204": "Official Rail Car Interchange (Either Actual or Agreed Upon)", "205": "Transmitted", "206": "Status (Outside Processor)", "207": "Status (Commercial)", "208": "Lot Number Expiration", "209": "Contract Performance Start", "210": "Contract Performance Delivery", "211": "Service Requested", "212": "Returned to Customer", "213": "Adjustment to Bill Dated", "214": "Date of Repair/Service", "215": "Interruption Start", "216": "Interruption End", "217": "Spud", "218": "Initial Completion", "219": "Plugged and Abandoned", "220": "Penalty", "221": "Penalty Begin", "222": "Birth", "223": "Birth Certificate", "224": "Adoption", "225": "Christening", "226": "Lease Commencement", "227": "Lease Term Start", "228": "Lease Term End", "229": "Rent Start", "230": "Installation", "231": "Progress Payment", "232": "Claim Statement Period Start", "233": "Claim Statement Period End", "234": "Settlement Date", "235": "Delayed Billing (Not Delayed Payment)", "236": "Lender Credit Check", "237": "Student Signed", "238": "Schedule Release", "239": "Baseline", "240": "Baseline Start", "241": "Baseline Complete", "242": "Actual Start", "243": "Actual Complete", "244": "Estimated Start", "245": "Estimated Completion", "246": "Start no earlier than", "247": "Start no later than", "248": "Finish no later than", "249": "Finish no earlier than", "250": "Mandatory (or Target) Start", "251": "Mandatory (or Target) Finish", "252": "Early Start", "253": "Early Finish", "254": "Late Start", "255": "Late Finish", "256": "Scheduled Start", "257": "Scheduled Finish", "258": "Original Early Start", "259": "Original Early Finish", "260": "Rest Day", "261": "Rest Start", "262": "Rest Finish", "263": "Holiday", "264": "Holiday Start", "265": "Holiday Finish", "266": "Base", "267": "Timenow", "268": "End Date of Support", "269": "Date Account Matures", "270": "Date Filed", "271": "Penalty End", "272": "Exit Plant Date", "273": "Latest On Board Carrier Date", "274": "Requested Departure Date", "275": "Approved", "276": "Contract Start", "277": "Contract Definition", "278": "Last Item Delivery", "279": "Contract Completion", "280": "Date Course of Orthodontics Treatment Began or is Expected to " "Begin", "281": "Over Target Baseline Month", "282": "Previous Report", "283": "Funds Appropriation - Start", "284": "Funds Appropriation - End", "285": "Employment or Hire", "286": "Retirement", "287": "Medicare", "288": "Consolidated Omnibus Budget Reconciliation Act (COBRA)", "289": "Premium Paid to Date", "290": "Coordination of Benefits", "291": "Plan", "292": "Benefit", "293": "Education", "294": "Earnings Effective Date", "295": "Primary Care Provider", "296": "Return to Work", "297": "Date Last Worked", "298": "Latest Absence", "299": "Illness", "300": "Enrollment Signature Date", "301": "Consolidated Omnibus Budget Reconciliation Act (COBRA) Qualifying " "Event", "302": "Maintenance", "303": "Maintenance Effective", "304": "Latest Visit or Consultation", "305": "Net Credit Service Date", "306": "Adjustment Effective Date", "307": "Eligibility", "308": "Pre-Award Survey", "309": "Plan Termination", "310": "Date of Closing", "311": "Latest Receiving Date/Cutoff Date", "312": "Salary Deferral", "313": "Cycle", "314": "Disability", "315": "Offset", "316": "Prior Incorrect Date of Birth", "317": "Corrected Date of Birth", "318": "Added", "319": "Failed", "320": "Date Foreclosure Proceedings Instituted", "321": "Purchased", "322": "Put into Service", "323": "Replaced", "324": "Returned", "325": "Disbursement Date", "326": "Guarantee Date", "327": "Quarter Ending", "328": "Changed", "329": "Terminated", "330": "Referral Date", "331": "Evaluation Date", "332": "Placement Date", "333": "Individual Education Plan (IEP)", "334": "Re-evaluation Date", "335": "Dismissal Date", "336": "Employment Begin", "337": "Employment End", "338": "Medicare Begin", "339": "Medicare End", "340": "Consolidated Omnibus Budget Reconciliation Act (COBRA) Begin", "341": "Consolidated Omnibus Budget Reconciliation Act (COBRA) End", "342": "Premium Paid to Date Begin", "343": "Premium Paid to Date End", "344": "Coordination of Benefits Begin", "345": "Coordination of Benefits End", "346": "Plan Begin", "347": "Plan End", "348": "Benefit Begin", "349": "Benefit End", "350": "Education Begin", "351": "Education End", "352": "Primary Care Provider Begin", "353": "Primary Care Provider End", "354": "Illness Begin", "355": "Illness End", "356": "Eligibility Begin", "357": "Eligibility End", "358": "Cycle Begin", "359": "Cycle End", "360": "Disability Begin", "361": "Disability End", "362": "Offset Begin", "363": "Offset End", "364": "Plan Period Election Begin", "365": "Plan Period Election End", "366": "Plan Period Election", "367": "Due to Customer", "368": "Submittal", "369": "Estimated Departure Date", "370": "Actual Departure Date", "371": "Estimated Arrival Date", "372": "Actual Arrival Date", "373": "Order Start", "374": "Order End", "375": "Delivery Start", "376": "Delivery End", "377": "Contract Costs Through", "378": "Financial Information Submission", "379": "Business Termination", "380": "Applicant Signed", "381": "Cosigner Signed", "382": "Enrollment", "383": "Adjusted Hire", "384": "Credited Service", "385": "Credited Service Begin", "386": "Credited Service End", "387": "Deferred Distribution", "388": "Payment Commencement", "389": "Payroll Period", "390": "Payroll Period Begin", "391": "Payroll Period End", "392": "Plan Entry", "393": "Plan Participation Suspension", "394": "Rehire", "395": "Retermination", "396": "Termination", "397": "Valuation", "398": "Vesting Service", "399": "Vesting Service Begin", "400": "Vesting Service End", "401": "Duplicate Bill", "402": "Adjustment Promised", "403": "Adjustment Processed", "404": "Year Ending", "405": "Production", "406": "Material Classification", "408": "Weighed", "409": "Date of Deed in Lieu", "410": "Date of Firm Commitment", "411": "Expiration Date of Extension to Foreclose", "412": "Date of Notice to Convey", "413": "Date of Release of Bankruptcy", "414": "Optimistic Early Start", "415": "Optimistic Early Finish", "416": "Optimistic Late Start", "417": "Optimistic Late Finish", "418": "Most Likely Early Start", "419": "Most Likely Early Finish", "420": "Most Likely Late Start", "421": "Most Likely Late Finish", "422": "Pessimistic Early Start", "423": "Pessimistic Early Finish", "424": "Pessimistic Late Start", "425": "Pessimistic Late Finish", "426": "First Payment Due", "427": "First Interest Payment Due", "428": "Subsequent Interest Payment Due", "429": "Irregular Interest Payment Due", "430": "Guarantor Received", "431": "Onset of Current Symptoms or Illness", "432": "Submission", "433": "Removed", "434": "Statement", "435": "Admission", "436": "Insurance Card", "437": "Spouse Retirement", "438": "Onset of Similar Symptoms or Illness", "439": "Accident", "440": "Release of Information", "441": "Prior Placement", "442": "Date of Death", "443": "Peer Review Organization (PRO) Approved Stay", "444": "First Visit or Consultation", "445": "Initial Placement", "446": "Replacement", "447": "Occurrence", "448": "Occurrence Span", "449": "Occurrence Span From", "450": "Occurrence Span To", "451": "Initial Fee Due", "452": "Appliance Placement", "453": "Acute Manifestation of a Chronic Condition", "454": "Initial Treatment", "455": "Last X-Ray", "456": "Surgery", "457": "Continuous Passive Motion (CPM)", "458": "Certification", "459": "Nursing Home From", "460": "Nursing Home To", "461": "Last Certification", "462": "Date of Local Office Approval of Conveyance of Occupied Real " "Estate Property", "463": "Begin Therapy", "464": "Oxygen Therapy From", "465": "Oxygen Therapy To", "466": "Oxygen Therapy", "467": "Signature", "468": "Prescription Fill", "469": "Provider Signature", "470": "Date of Local Office Certification of Conveyance of Damaged Real " "Estate Property", "471": "Prescription", "472": "Service", "473": "Medicaid Begin", "474": "Medicaid End", "475": "Medicaid", "476": "Peer Review Organization (PRO) Approved Stay From", "477": "Peer Review Organization (PRO) Approved Stay To", "478": "Prescription From", "479": "Prescription To", "480": "Arterial Blood Gas Test", "481": "Oxygen Saturation Test", "482": "Pregnancy Begin", "483": "Pregnancy End", "484": "Last Menstrual Period", "485": "Injury Begin", "486": "Injury End", "487": "Nursing Home", "488": "Collateral Dependent", "489": "Collateral Dependent Begin", "490": "Collateral Dependent End", "491": "Sponsored Dependent", "492": "Sponsored Dependent Begin", "493": "Sponsored Dependent End", "494": "Deductible", "495": "Out-of-Pocket", "496": "Contract Audit Date", "497": "Latest Delivery Date at Pier", "498": "Mortgagee Reported Curtailment Date", "499": "Mortgagee Official Signature Date", "500": "Resubmission", "501": "Expected Reply", "502": "Dropped to Less than Half Time", "503": "Repayment Begin", "504": "Loan Servicing Transfer", "505": "Loan Purchase", "506": "Last Notification", "507": "Extract", "508": "Extended", "509": "Servicer Signature Date", "510": "Date Packed", "511": "Shelf Life Expiration", "512": "Warranty Expiration", "513": "Overhauled", "514": "Transferred", "515": "Notified", "516": "Discovered", "517": "Inspected", "518": "Voucher (Date of)", "519": "Date Bankruptcy Filed", "520": "Date of Damage", "521": "Date Hazard Insurance Policy Cancelled", "522": "Expiration Date to Submit Title Evidence", "523": "Date of Claim", "524": "Date of Notice of Referral for Assignment", "525": "Date of Notice of Probable Ineligibility for Assignment", "526": "Date of Foreclosure Notice", "527": "Expiration of Foreclosure Timeframe", "528": "Date Possessory Action Initiated", "529": "Date of Possession", "530": "Date of Last Installment Received", "531": "Date of Acquisition of Title", "532": "Expiration of Extension to Convey", "533": "Date of Assignment Approval", "534": "Date of Assignment Rejection", "535": "Curtailment Date from Advice of Payment", "536": "Expiration of Extension to Submit Fiscal Data", "537": "Date Documentation, or Paperwork, or Both Was Sent", "538": "Makegood Commercial Date", "539": "Policy Effective", "540": "Policy Expiration", "541": "Employee Effective Date of Coverage", "542": "Date of Representation", "543": "Last Premium Paid Date", "544": "Date Reported to Employer", "545": "Date Reported to Claim Administrator", "546": "Date of Maximum Medical Improvement", "547": "Date of Loan", "548": "Date of Advance", "549": "Beginning Lay Date", "550": "Certificate Effective", "551": "Benefit Application Date", "552": "Actual Return to Work", "553": "Released Return to Work", "554": "Ending Lay Date", "555": "Employee Wages Ceased", "556": "Last Salary Increase", "557": "Employee Laid Off", "558": "Injury or Illness", "559": "Oldest Unpaid Installment", "560": "Preforeclosure Acceptance Date", "561": "Preforeclosure Sale Closing Date", "562": "Date of First Uncured Default", "563": "Date Default Was Cured", "564": "Date of First Mortgage Payment", "565": "Date of Property Inspection", "566": "Date Total Amount of Delinquency Reported", "567": "Date Outstanding Loan Balance Reported", "568": "Date Foreclosure Sale Scheduled", "569": "Date Foreclosure Held", "570": "Date Redemption Period Ends", "571": "Date Voluntary Conveyance Accepted", "572": "Date Property Sold", "573": "Date Claim Paid", "574": "Action Begin Date", "575": "Projected Action End Date", "576": "Action End Date", "577": "Original Maturity Date", "578": "Date Referred to Attorney for Foreclosure", "579": "Planned Release", "580": "Actual Release", "581": "Contract Period", "582": "Report Period", "583": "Suspension", "584": "Reinstatement", "585": "Report", "586": "First Contact", "587": "Projected Foreclosure Sale Date", "589": "Date Assignment Filed for Record", "590": "Date of Appraisal", "591": "Expiration Date of Extension to Assign", "592": "Date of Extension to Convey", "593": "Date Hazard Insurance Policy Refused", "594": "High Fabrication Release Authorization", "595": "High Raw Material Authorization", "596": "Material Change Notice", "597": "Latest Delivery Date at Rail Ramp", "598": "Rejected", "599": "Repayment Schedule Sent", "600": "As Of", "601": "First Submission", "602": "Subsequent Submission", "603": "Renewal", "604": "Withdrawn", "606": "Certification Period Start", "607": "Certification Revision", "608": "Continuous Coverage Date(s)", "609": "Prearranged Deal Match", "610": "Contingency End", "611": "Oxygen Therapy Evaluation", "612": "Shut In", "613": "Allowable Effective", "614": "First Sales", "615": "Date Acquired", "616": "Interviewer Signed", "617": "Application Logged Date", "618": "Review Date", "619": "Decision Date", "620": "Previously Resided", "621": "Reported", "622": "Checked", "623": "Settled", "624": "Presently Residing", "625": "Employed in this Position", "626": "Verified", "627": "Second Admission Date", "628": "Inquiry", "629": "Account Opened", "630": "Account Closed", "631": "Property Acquired", "632": "Property Built", "633": "Employed in this Profession", "634": "Next Review Date", "635": "Initial Contact Date", "636": "Date of Last Update", "637": "Second Discharge Date", "638": "Date of Last Draw", "640": "Complaint", "641": "Option", "642": "Solicitation", "643": "Clause", "644": "Meeting", "646": "Rental Period", "647": "Next Pay Increase", "648": "Period Covered by Source Documents", "649": "Document Due", "650": "Court Notice", "651": "Expected Funding Date", "652": "Assignment Recorded", "653": "Case Reopened", "655": "Previous Court Event", "656": "Last Date to Object", "657": "Court Event", "658": "Last Date to File a Claim", "659": "Case Converted", "660": "Debt Incurred", "661": "Judgment", "662": "Wages Start", "663": "Wages End", "664": "Date Through Which Property Taxes Have Been Paid", "665": "Paid Through Date", "666": "Date Paid", "667": "Anesthesia Administration", "668": "Price Protection", "669": "Claim Incurred", "670": "Book Entry Delivery", "671": "Rate Adjustment", "672": "Next Installment Due Date", "673": "Daylight Overdraft Time", "674": "Presentment Date", "675": "Negotiated Extension Date", "681": "Remittance", "682": "Security Rate Adjustment", "683": "Filing Period", "684": "Review Period End", "685": "Requested Settlement", "686": "Last Screening", "687": "Confinement", "688": "Arrested", "689": "Convicted", "690": "Interviewed", "691": "Last Visit", "692": "Recovery", "693": "Time in U.S.", "694": "Future Period", "695": "Previous Period", "696": "Interest Paid To", "697": "Date of Seizure", "699": "Setoff", "700": "Override Date for Settlement", "701": "Settlement Date (From Interline Settlement System (ISS) only)", "702": "Sending Road Time Stamp", "703": "Retransmission Time Stamp", "704": "Delivery Appointment Date and Time", "705": "Interest Paid Through", "706": "Date Material Usage Suspended", "707": "Last Payment Made", "708": "Past Due", "709": "Analysis Month Ending", "710": "Date of Specification", "711": "Date of Standard", "712": "Return to Work Part Time", "713": "Paid-through Date for Salary Continuation", "714": "Paid-through Date for Vacation Pay", "715": "Paid-through Date for Accrued Sick Pay", "716": "Appraisal Ordered", "717": "Date of Operation", "718": "Best Time to Call", "719": "Verbal Report Needed", "720": "Estimated Escrow Closing", "721": "Permit Year", "722": "Remodeling Completed", "723": "Current Month Ending", "724": "Previous Month Ending", "725": "Cycle to Date", "726": "Year to Date", "727": "On Hold", "728": "Off Hold", "729": "Facsimile Due By", "730": "Reporting Cycle Date", "731": "Last Paid Installment Date", "732": "Claims Made", "733": "Date of Last Payment Received", "734": "Curtailment Date", "736": "Pool Settlement", "737": "Next Interest Change Date", "738": "Most Recent Hemoglobin or Hematocrit or Both", "739": "Most Recent Serum Creatine", "740": "Closed", "741": "Therapy", "742": "Implantation", "743": "Explantation", "744": "Date Became Aware", "745": "First Marketed", "746": "Last Marketed", "750": "Expected Problem Resolution", "751": "Alternate Problem Resolution", "752": "Fee Capitalization", "753": "Interest Capitalization", "754": "Next Payment Due", "755": "Conversion to Repayment", "756": "End of Grace", "757": "School Refund", "758": "Simple Interest Due", "760": "Printed", "770": "Back on Market", "771": "Status", "773": "Off-Market", "774": "Tour", "776": "Listing Received", "778": "Anticipated Closing", "779": "Last Publication", "780": "Sold Book Publication", "781": "Occupancy", "782": "Contingency", "783": "Percolation Test", "784": "Septic Approval", "785": "Title Transfer", "786": "Open House", "789": "Homestead", "800": "Midpoint of Performance", "801": "Acquisition Date", "802": "Date of Action", "803": "Paid in Full", "804": "Refinance", "805": "Voluntary Termination", "806": "Customer Order", "807": "Stored", "808": "Selected", "809": "Posted", "810": "Document Received", "811": "Rebuilt", "812": "Marriage", "813": "Customs Entry Date", "814": "Payment Due Date", "815": "Maturity Date", "816": "Trade Date", "817": "Gallons Per Minute (GPM) Test Performed", "818": "British Thermal Unit (BTU) Test Perfomed", "820": "Real Estate Tax Year", "821": "Final Reconciliation Value Estimate as of", "822": "Map", "823": "Opinion", "824": "Version", "825": "Original Due Date", "826": "Incumbency Period", "827": "Audience Deficiency Period", "828": "Aired Date", "830": "Schedule", "831": "Paid Through Date for Minimum Payment", "832": "Paid Through Date for Total Payment", "840": "Election", "841": "Engineering Data List", "842": "Last Production", "843": "Not Before", "844": "Not After", "845": "Initial Claim", "846": "Benefits Paid", "847": "Wages Earned", "848": "Adjusted Start", "849": "Adjusted End", "850": "Revised Adjusted Start", "851": "Revised Adjusted End", "853": "Field Test", "854": "Mortgage Note Date", "855": "Alternative Due Date", "856": "First Payment Change", "857": "First Rate Adjustment", "858": "Alternate Base Period", "859": "Prior Notice", "860": "Appointment Effective", "861": "Appointment Expiration", "862": "Company Termination", "863": "Continuing Education Requirement", "864": "Distributor Effective", "865": "Distributor Termination", "866": "Examination", "867": "Incorporation Dissolution", "868": "Last Follow-up", "869": "License Effective", "870": "License Expiration", "871": "License Renewal", "872": "License Requested", "873": "Mailed", "874": "Paperwork Mailed", "875": "Previous Employment", "876": "Previous Employment End", "877": "Previous Employment Start", "878": "Previous Residence", "879": "Previous Residence End", "880": "Previous Residence Start", "881": "Request", "882": "Resident License Effective", "883": "Resident License Expiration", "884": "State Termination", "885": "Texas Line Termination", "900": "Acceleration", "901": "Adjusted Contestability", "902": "Application", "903": "Application Entry", "904": "Approval/Offer", "905": "Automatic Premium Loan", "906": "Collection", "907": "Confinement End", "908": "Confinement Start", "909": "Contestability", "910": "Flat Extra End", "911": "Last Activity", "912": "Last Change", "913": "Last Episode", "914": "Last Meal", "915": "Loan", "916": "Application Status", "917": "Maturity", "918": "Medical Information Signature", "919": "Medical Information System", "920": "Note", "921": "Offer Expiration", "922": "Original Receipt", "923": "Placement", "924": "Placement Period Expiration", "925": "Processing", "926": "Recapture", "927": "Re-entry", "928": "Reissue", "929": "Reinstatement", "930": "Requalification", "931": "Reinsurance Effective", "932": "Reservation of Facility", "933": "Settlement Status", "934": "Table Rating End", "935": "Termination of Facility", "936": "Treatment", "937": "Department of Labor Wage Determination Date", "938": "Order", "939": "Resolved", "940": "Execution Date", "941": "Capitation Period Start", "942": "Capitation Period End", "943": "Last Date for a Government Agency to File a Claim", "944": "Adjustment Period", "945": "Activity", "946": "Mail By", "947": "Preparation", "948": "Payment Initiated", "949": "Payment Effective", "950": "Application", "951": "Reclassification", "952": "Reclassification (Exit Date)", "953": "Post-Reclassification", "954": "Post-Reclassification (First Report Card)", "955": "Post-Reclassification (First Semi-annual)", "956": "Post-Reclassification (Second Semi-annual)", "957": "Post-Reclassification (End of Second Year)", "960": "Adjusted Death Benefit", "961": "Anniversary", "962": "Annuitization", "963": "Annuity Commencement Date", "964": "Bill", "965": "Calendar Anniversary", "966": "Contract Mailed", "967": "Early Withdrawal", "968": "Fiscal Anniversary", "969": "Income", "970": "Initial Premium", "971": "Initial Premium Effective", "972": "Last Premium Effective", "973": "Minimum Required Distribution", "974": "Next Anniversary", "975": "Notice", "976": "Notification of Death", "977": "Partial Annuitization", "978": "Plan Anniversary", "979": "Policy Surrender", "980": "Prior Contract Anniversary", "981": "Prior Contract Issue", "982": "Signature Received", "983": "Tax", "984": "Benefit Period", "985": "Month to Date", "986": "Semiannual Ending", "987": "Surrender", "988": "Plan of Treatment Period", "989": "Prior Hospitalization Date(s) Related to Current Service(s)", "992": "Date Requested", "993": "Request for Quotation", "994": "Quote", "995": "Recorded Date", "996": "Required Delivery", "997": "Quote to be Received By", "998": "Continuation of Pay Start Date", "999": "Document Date", "AA1": "Estimated Point of Arrival", "AA2": "Estimated Point of Discharge", "AA3": "Cancel After, Ex Country", "AA4": "Cancel After, Ex Factory", "AA5": "Do Not Ship Before, Ex Country", "AA6": "Do Not Ship Before, Ex Factory", "AA7": "Final Scheduled Payment", "AA8": "Actual Discharge", "AA9": "Address Period", "AAA": "Arrival in Country", "AAB": "Citation", "AAD": "Crime", "AAE": "Discharge - Planned", "AAF": "Draft", "AAG": "Due Date", "AAH": "Event", "AAI": "First Involvement", "AAJ": "Guarantee Period", "AAK": "Income Increase Period", "AAL": "Installment Date", "AAM": "Last Civilian Flight", "AAN": "Last Flight", "AAO": "Last Insurance Medical", "AAP": "Last Military Flight", "AAQ": "Last Physical", "AAR": "License", "AAS": "Medical Certificate", "AAT": "Medication", "AAU": "Net Worth Date", "AAV": "Next Activity", "AAW": "Ownewship Change", "AAX": "Ownership Period", "AAY": "Rate Date", "AAZ": "Requested Contract", "AB1": "Requested Offer", "AB2": "Sales Period", "AB3": "Tax Year", "AB4": "Time Period", "AB5": "Travel", "AB6": "Treatment End", "AB7": "Treatment Start", "AB8": "Trust", "AB9": "Worst Time to Call", "ABA": "Registration", "ABB": "Revoked", "ABC": "Estimated Date of Birth", "ABD": "Last Annual Report", "ABE": "Legal Action Started", "ABG": "Payment Period", "ABH": "Profit Period", "ABI": "Registered", "ABK": "Consolidated", "ABL": "Board of Directors Not Authorized As Of", "ABM": "Board of Directors Incomplete As Of", "ABN": "Manager Not Registered As Of", "ABO": "Citizenship Change", "ABP": "Participation", "ABQ": "Capitalization", "ABR": "Registration of Board of Directors", "ABS": "Ceased Operations", "ABT": "Satisfied", "ABU": "Terms Met", "ABV": "Asset Documentation Expiration", "ABW": "Credit Documentation Expiration", "ABX": "Income Documentation Expiration", "ABY": "Product Held Until", "ACA": "Immigration Date", "ACB": "Estimated Immigration Date", "ACK": "Acknowledgment", "ADB": "Business Control Change", "ADC": "Court Registration", "ADD": "Annual Report Due", "ADL": "Asset and Liability Schedule", "ADM": "Annual Report Mailed", "ADR": "Annual Report Filed", "ARD": "Annual Report Delinquency", "CAD": "Changed Accounting Date", "CCR": "Customs Cargo Release", "CDT": "Maintenance Comment", "CEA": "Formation", "CEB": "Continuance", "CEC": "Merger", "CED": "Year Due", "CEE": "Next Annual Meeting", "CEF": "End of Last Fiscal Year", "CEH": "Year Beginning", "CEJ": "Started Doing Business", "CEK": "Sworn and Subscribed", "CEL": "Calendar Year", "CEM": "Asset", "CEN": "Inactivity", "CEO": "High Capital Year", "CLO": "Closing Date of First Balance Sheet", "CLU": "Closed Until", "COM": "Compliance", "CON": "Converted into Holding Company", "CUR": "Current List", "DDO": "Declaration", "DEE": "Deed Not Available", "DET": "Detrimental Information Received", "DFF": "Deferral", "DFS": "Departure From Specification", "DIS": "Disposition", "DLC": "Date of Last Contact", "DOA": "Date of Abandonment", "DOI": "Delivery Order Issued", "DSP": "Disposal", "ECD": "Estimated Construction Date", "ECF": "Estimated Completion - First Prior Month", "ECS": "Estimated Completion - Second Prior Month", "ECT": "Estimated Completion - Third Prior Month", "EPP": "Estimate Preparation", "ESC": "Estimate Comment", "ESF": "Estimated Start - First Prior Month", "ESS": "Estimated Start - Second Prior Month", "EST": "Estimated Start - Third Prior Month", "ETP": "Earliest Filing Period", "EXO": "Exposure", "EXP": "Export", "FFI": "Financial Information", "GRD": "Graduated", "ICF": "Converted to Electronic Date", "IDG": "Insolvency Discharge Granted", "III": "Incorporation", "IMP": "Import", "INC": "Incident", "INT": "Inactive Until", "KEV": "Key Event Fiscal Year", "KEW": "Key Event Calendar Year", "LAS": "Last Check for Balance Sheet Update", "LCC": "Last Capital Change", "LEA": "Letter of Agreement", "LEL": "Letter of Liability", "LIQ": "Liquidation", "LLP": "Low Period", "LOG": "Equipment Log Entry", "LPC": "List Price Change", "LSC": "Legal Structure Change", "LTP": "Latest Filing Period", "MRR": "Meter Reading", "MSD": "Latest Material Safety Data Sheet Date", "NAM": "Present Name", "NFD": "Negotiated Finish", "NRG": "Not Registered", "NSD": "Negotiated Start", "ORG": "Original List", "PBC": "Present Control", "PDV": "Privilege Details Verification", "PLS": "Present Legal Structure", "PPP": "Peak Period", "PRD": "Previously Reported Date of Birth", "PRR": "Presented to Receivers", "PTD": "Paid To Date", "RAP": "Receiver Appointed", "RES": "Resigned", "RFD": "Requested Finish", "RFF": "Recovery Finish", "RFO": "Referred From", "RNT": "Rent Survey", "RRM": "Received in the Mail", "RRT": "Revocation", "RSD": "Requested Start", "RSS": "Recovery Start", "RTO": "Referred To", "SCV": "Social Security Claims Verification", "SDD": "Sole Directorship Date", "STN": "Transition", "TSR": "Trade Style Registered", "TSS": "Trial Started", "TST": "Trial Set", "VAT": "Value Added Tax (VAT) Claims Verification", "VLU": "Valid Until", "W01": "Sample Collected", "W02": "Status Change", "W03": "Construction Start", "W05": "Recompletion", "W06": "Last Logged", "W07": "Well Log Run", "W08": "Surface Casing Authority Approval", "W09": "Reached Total Depth", "W10": "Spacing Order Unit Assigned", "W11": "Rig Arrival", "W12": "Location Exception Order Number Assigned", "W13": "Sidetracked Wellbore", "WAY": "Waybill", "YXX": "Programmed Fiscal Year", "YXY": "Programmed Calendar Year", "ZZZ": "Mutually Defined", }
bsd-3-clause
-3,367,867,814,516,382,700
36.845537
79
0.616177
false
gamechanger/deferrable
tests/metadata_test.py
1
1210
from unittest import TestCase from uuid import uuid1 from mock import Mock from deferrable.metadata import MetadataProducerConsumer from deferrable.pickling import dumps class TestMetadataProducerConsumer(TestCase): def setUp(self): self.item = {'id': uuid1(), 'metadata': {'premade': dumps(10)}} self.cls = MetadataProducerConsumer self.cls.NAMESPACE = 'namespace' self.my_mock = Mock() def test_raises_on_empty_namespace(self): with self.assertRaises(ValueError): self.cls.NAMESPACE = None self.cls() def test_does_not_raise_with_valid_namespace(self): self.cls.NAMESPACE = 'namespace' self.cls() def test_apply_metadata_to_item(self): self.cls.produce_metadata = lambda self: 1 self.cls()._apply_metadata_to_item(self.item) self.assertEqual(self.item['metadata']['namespace'], dumps(1)) def test_consume_metadata_from_item(self): self.cls.NAMESPACE = 'premade' self.cls.consume_metadata = lambda instance, metadata: self.my_mock(metadata) self.cls()._consume_metadata_from_item(self.item) self.assertTrue(self.my_mock.called_once_with_args(10))
mit
-8,957,538,516,853,558,000
35.666667
85
0.673554
false
praekeltfoundation/seed-xylem
seed/xylem/postgres.py
1
6943
import base64 import hashlib import os import random import re import time import uuid from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.backends import default_backend from rhumba import RhumbaPlugin from twisted.internet import defer, reactor from twisted.enterprise import adbapi from seed.xylem.pg_compat import psycopg2, errorcodes, DictCursor class APIError(Exception): """ Custom exception to make API errors easier to work with. """ def __init__(self, err_msg): super(APIError, self).__init__() self.err_msg = err_msg class Plugin(RhumbaPlugin): # FIXME: Setup is asynchronous and there may be a race condition if we try # to process a request before setup finishes. def __init__(self, *args, **kw): setup_db = kw.pop('setup_db', True) super(Plugin, self).__init__(*args, **kw) self.servers = self.config['servers'] # Details for Xylems internal DB self.db = self.config.get('db_name', 'xylem') self.host = self.config.get('db_host', 'localhost') self.port = self.config.get('db_port', 5432) self.password = self.config.get('db_password', '') self.username = self.config.get('db_username', 'postgres') self.key = self.config['key'] if setup_db: reactor.callWhenRunning(self._setup_db) def _cipher(self, key_iv): """ Construct a Cipher object with suitable parameters. The parameters used are compatible with the pycrypto code this implementation replaced. """ key = hashlib.md5(self.key).hexdigest() return Cipher( algorithms.AES(key), modes.CFB8(key_iv), backend=default_backend()) def _encrypt(self, s): key_iv = os.urandom(algorithms.AES.block_size / 8) encryptor = self._cipher(key_iv).encryptor() pwenc = encryptor.update(s) + encryptor.finalize() return base64.b64encode(key_iv + pwenc) def _decrypt(self, e): block_size = algorithms.AES.block_size / 8 msg = base64.b64decode(e) key_iv = msg[:block_size] decryptor = self._cipher(key_iv).decryptor() return decryptor.update(msg[block_size:]) + decryptor.finalize() def _setup_db(self): db_table = ( "CREATE TABLE databases (name varchar(66) UNIQUE, host" " varchar(256), username varchar(256), password varchar(256));") cur = self._get_xylem_db() d = cur.runOperation(db_table) ignore_pg_error(d, errorcodes.DUPLICATE_TABLE) d.addBoth(cursor_closer(cur)) return d def _create_password(self): # Guranteed random dice rolls return base64.b64encode( hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24] def _create_username(self, db): return base64.b64encode("mydb" + str( time.time()+random.random()*time.time())).strip('=').lower() def _get_connection(self, db, host, port, user, password): return adbapi.ConnectionPool( 'psycopg2', database=db, host=host, port=port, user=user, password=password, cp_min=1, cp_max=2, cp_openfun=self._fixdb, cursor_factory=DictCursor) def _get_xylem_db(self): return self._get_connection( db=self.db, host=self.host, port=self.port, user=self.username, password=self.password) def _fixdb(self, conn): conn.autocommit = True def call_create_database(self, args): cleanups = [] # Will be filled with callables to run afterwards def cleanup_cb(r): d = defer.succeed(None) for f in reversed(cleanups): d.addCallback(lambda _: f()) return d.addCallback(lambda _: r) def api_error_eb(f): f.trap(APIError) return {"Err": f.value.err_msg} d = self._call_create_database(args, cleanups.append) d.addBoth(cleanup_cb) d.addErrback(api_error_eb) return d def _build_db_response(self, row): return { "Err": None, "name": row['name'], "hostname": row['host'], "user": row['username'], "password": self._decrypt(row['password']), } @defer.inlineCallbacks def _call_create_database(self, args, add_cleanup): # TODO: Validate args properly. name = args['name'] if not re.match('^\w+$', name): raise APIError("Database name must be alphanumeric") xylemdb = self._get_xylem_db() add_cleanup(cursor_closer(xylemdb)) find_db = "SELECT name, host, username, password FROM databases"\ " WHERE name=%s" rows = yield xylemdb.runQuery(find_db, (name,)) if rows: defer.returnValue(self._build_db_response(rows[0])) else: server = random.choice(self.servers) connect_addr = server.get('connect_addr', server['hostname']) rdb = self._get_connection( 'postgres', connect_addr, int(server.get('port', 5432)), server.get('username', 'postgres'), server.get('password')) add_cleanup(cursor_closer(rdb)) check = "SELECT * FROM pg_database WHERE datname=%s;" r = yield rdb.runQuery(check, (name,)) if not r: user = self._create_username(name) password = self._create_password() create_u = "CREATE USER %s WITH ENCRYPTED PASSWORD %%s;" % user yield rdb.runOperation(create_u, (password,)) create_d = "CREATE DATABASE %s ENCODING 'UTF8' OWNER %s;" % ( name, user) yield rdb.runOperation(create_d) rows = yield xylemdb.runQuery( ("INSERT INTO databases (name, host, username, password)" " VALUES (%s, %s, %s, %s) RETURNING *;"), (name, server['hostname'], user, self._encrypt(password))) defer.returnValue(self._build_db_response(rows[0])) else: raise APIError('Database exists but not known to xylem') def ignore_pg_error(d, pgcode): """ Ignore a particular postgres error. """ def trap_err(f): f.trap(psycopg2.ProgrammingError) if f.value.pgcode != pgcode: return f return d.addErrback(trap_err) def cursor_closer(cur): """ Construct a cursor closing function that can be used on its own or as a passthrough callback. """ def close_cursor(r=None): if cur.running: cur.close() return r return close_cursor
mit
5,221,785,347,617,478,000
30.848624
79
0.571943
false
matthiaskramm/corepy
examples/spu_write_mbox.py
1
3062
# Copyright (c) 2006-2009 The Trustees of Indiana University. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # - Neither the Indiana University nor the names of its contributors may be used # to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import corepy.arch.spu.isa as spu import corepy.arch.spu.platform as env import corepy.arch.spu.lib.dma as dma from corepy.arch.spu.lib.util import load_word prgm = env.Program() code = prgm.get_stream() proc = env.Processor() # Grab a register and initialize it reg = prgm.acquire_register() load_word(code, reg, 0xCAFEBABE) # Write the value to the outbound mailbox dma.spu_write_out_mbox(code, reg) # Wait for a signal sig = dma.spu_read_signal1(code) prgm.release_register(sig) prgm.release_register(reg) prgm.add(code) # Start the synthesized SPU program id = proc.execute(prgm, async = True) # Spin until the mailbox can be read while env.spu_exec.stat_out_mbox(id) == 0: pass value = env.spu_exec.read_out_mbox(id) # Signal the SPU env.spu_exec.write_signal(id, 1, 0x1234) # Wait for the SPU program to complete proc.join(id) print "value 0x%X" % value
bsd-3-clause
5,050,079,665,438,841,000
43.376812
80
0.610059
false
ATNF/askapsdp
Tools/Dev/epicsdb/askapdev/epicsdb/sphinxext.py
1
7822
""" :class:`EpicsDbDirective` implements the ``epicsdb`` -directive. """ import os, sys, re import encodings from docutils.parsers.rst import Directive, directives from docutils import nodes, utils from sphinx import addnodes from sphinx.locale import _ #TODO handle expand keyword def normalize_ref(text): ret = text.replace('$', '').replace('(', '').replace(')', '').replace(':', '-').replace('_', '-') ret = ret.lower() return encodings.normalize_encoding(ret).replace('_', '-') class PVNode(nodes.General, nodes.Element): pass class EpicsDbDirective(Directive): """ This implements the directive. Directive allows to create RST tables from the contents of the ParameterSet file. The functionality is very similar to csv-table (docutils) and xmltable (:mod:`sphinxcontrib.xmltable`). Example of the directive: .. code-block:: rest .. epicsdb:: path/to/epics.db :hide-pv: hide_regex :hide-tag: where: hide_regex regular expression of PV to hide, e.g. ``.*_RAW$`` to hide all PVs ending in _RAW """ required_arguments = 1 has_content = False option_spec = { 'show-pv': directives.unchanged, 'hide-pv': directives.unchanged, 'hide-tag' : directives.flag } def __init__(self, *args, **kwargs): Directive.__init__(self, *args, **kwargs) # pv name valid chars including macros pvNameRegex = '[a-zA-Z0-9_\-:\[\]<>;$(),]+' self.reRecord = re.compile('\s*record\(\s*(\w+)\s*,\s*"(%s)"\s*\)' % pvNameRegex) self.reField = re.compile('(\s*)field\(\s*(FLNK|LNK.?|INP.?|OUT.?|DOL)\s*,\s*"(%s)(.*)"\s*\)' % pvNameRegex) self.reComment = re.compile('#') self.reVDCTComment = re.compile('#\!') self.reTagComment = re.compile('#\$\ *(\w+)') self.reExpand = re.compile('\s*expand') self.reEndRecord = re.compile('\s*}$') def _get_directive_path(self, path): """ Returns transformed path from the directive option/content """ source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) path = os.path.normpath(os.path.join(source_dir, path)) return utils.relative_path(None, path) def run(self): """ Implements the directive """ env = self.state.document.settings.env if not hasattr(env, 'epicsPVs'): env.epicsPVs = {} # Get content and options file_path = self.arguments[0] show_pv = self.options.get('show-pv', None) hide_pv = self.options.get('hide-pv', None) hide_tag = 'hide-tag' in self.options if hide_pv is not None: hide_pv = [re.compile(pv.strip()) for pv in hide_pv.split(',')] if not file_path: return [self._report('file_path -option missing')] # Transform the path suitable for processing file_path = self._get_directive_path(file_path) dbFile = open(file_path, 'r').readlines() file_path = os.path.basename(file_path) node = nodes.section() node['ids'] = [file_path] node += nodes.title(text=file_path) in_record = False hide_record = False tags = {} comments = [] for line in dbFile: # handle dos files line = line.replace('\r\n','\n') # collect record comments if self.reComment.match(line): if self.reVDCTComment.match(line): # igorne VDCT comments continue tag = self.reTagComment.match(line) if tag is not None: tags[tag.group(1)] = True continue comments.append(line) continue # ignore expand blocks for now if self.reExpand.match(line): hide_record = True print "Ignoring db expand" continue recordMatch = self.reRecord.match(line) if recordMatch: pvName = recordMatch.group(2) if hide_tag and 'HIDE_PV' in tags: print "hiding tagged PV", pvName hide_record = True continue if hide_pv is not None: for regex in hide_pv: if regex.match(pvName): print "hiding found PV", pvName hide_record = True continue in_record = True record_text = '' # where does :ref: role modify the label? label = normalize_ref(pvName) env.epicsPVs[label] = env.docname section = nodes.section() section['ids'] = [label] title = nodes.title(text=pvName) section += title if len(comments) > 0: bullets = nodes.bullet_list() for comment in comments: item = nodes.list_item() item += nodes.paragraph(text=comment.lstrip(' #')) bullets += item section += bullets if in_record: # parse the field for PV names fieldMatch = self.reField.match(line) fieldPV = '1' if fieldMatch: indent, field, fieldPV, attrib = fieldMatch.groups() if not fieldPV.isdigit(): # expand PV names (only non-constants) record_text += '%sfield(%s, ":pv:`%s`%s")\n' % (indent, field, fieldPV, attrib) else: record_text += line if self.reEndRecord.match(line): if not hide_record: # parse record through inline rst parser to resolve PV links text_nodes, messages = self.state.inline_text(record_text, self.lineno) section += nodes.literal_block(record_text, '', *text_nodes, **self.options) node += section in_record = False hide_record = False comments = [] tags = {} # add the PV to the index indextext = _('%s (PV)') % pvName inode = addnodes.index(entries=[('single', indextext, normalize_ref(pvName), pvName)]) node += inode return [node] def epics_pv_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): node = PVNode() node['pvname'] = text return [node], [] def process_pvnodes(app, doctree, fromdocname): env = app.builder.env for pvnode in doctree.traverse(PVNode): pvname = pvnode['pvname'] ref = normalize_ref(pvname) # match against PV basename if ref not in env.epicsPVs: for pv in env.epicsPVs.keys(): if re.search('.*' + ref + '$', pv): ref = pv break if ref in env.epicsPVs: # TODO will break other renderers ref = env.epicsPVs[ref] + '.html#' + ref newnode = nodes.reference(pvname, pvname, internal=True, refuri=ref) pvnode.replace_self([newnode]) def setup(app): """ Extension setup, called by Sphinx """ app.add_node(PVNode) app.add_directive('epicsdb', EpicsDbDirective) app.add_role('pv', epics_pv_role) app.connect('doctree-resolved', process_pvnodes)
gpl-2.0
576,053,415,792,885,570
33.008696
116
0.520967
false
WikiWatershed/gwlf-e
gwlfe/MultiUse_Fxns/Runoff/CNumImpervReten.py
1
2166
from numpy import repeat from numpy import tile from numpy import where from numpy import zeros from gwlfe.Input.LandUse.NLU import NLU from gwlfe.Input.WaterBudget.Water import Water, Water_f from gwlfe.Memoization import memoize from gwlfe.MultiUse_Fxns.Runoff.CNI import CNI, CNI_f from gwlfe.MultiUse_Fxns.Runoff.CNumImperv import CNumImperv, CNumImperv_f @memoize def CNumImpervReten(NYrs, DaysMonth, Temp, Prec, InitSnow_0, AntMoist_0, NRur, NUrb, CNI_0, Grow_0): # TODO: this is exactly the same as perv and retention cni = CNI(NRur, NUrb, CNI_0) c_num_imperv = CNumImperv(NYrs, NRur, NUrb, DaysMonth, InitSnow_0, Temp, Prec, CNI_0, Grow_0, AntMoist_0) nlu = NLU(NRur, NUrb) water = Water(NYrs, DaysMonth, InitSnow_0, Temp, Prec) result = zeros((NYrs, 12, 31, nlu)) for Y in range(NYrs): for i in range(12): for j in range(DaysMonth[Y][i]): if Temp[Y][i][j] > 0 and water[Y][i][j] > 0.01: # missing if water[Y][i][j] < 0.05: # missing pass else: for l in range(NRur, nlu): if cni[1][l] > 0: result[Y][i][j][l] = 2540 / c_num_imperv[Y][i][j][l] - 25.4 if result[Y][i][j][l] < 0: result[Y][i][j][l] = 0 return result def CNumImpervReten_f(NYrs, DaysMonth, Temp, Prec, InitSnow_0, AntMoist_0, NRur, NUrb, CNI_0, Grow_0): cni = CNI_f(NRur, NUrb, CNI_0) cni_1 = tile(cni[1][None, None, None, :], (NYrs, 12, 31, 1)) c_num_imperv = CNumImperv_f(NYrs, NRur, NUrb, DaysMonth, InitSnow_0, Temp, Prec, CNI_0, Grow_0, AntMoist_0) nlu = NLU(NRur, NUrb) water = repeat(Water_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec)[:, :, :, None], nlu, axis=3) result = zeros((NYrs, 12, 31, nlu)) TempE = repeat(Temp[:, :, :, None], nlu, axis=3) result[where((TempE > 0) & (water >= 0.05) & (cni_1 > 0))] = 2540 / c_num_imperv[ where((TempE > 0) & (water >= 0.05) & (cni_1 > 0))] - 25.4 result[where(result < 0)] = 0 return result
apache-2.0
2,952,450,171,647,139,300
45.085106
111
0.564635
false
all-of-us/raw-data-repository
rdr_service/lib_fhir/fhirclient_1_0_6/models/organization.py
1
5006
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Organization) on 2016-06-23. # 2016, SMART Health IT. from . import domainresource class Organization(domainresource.DomainResource): """ A grouping of people or organizations with a common purpose. A formally or informally recognized grouping of people or organizations formed for the purpose of achieving some form of collective action. Includes companies, institutions, corporations, departments, community groups, healthcare practice groups, etc. """ resource_name = "Organization" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.active = None """ Whether the organization's record is still in active use. Type `bool`. """ self.address = None """ An address for the organization. List of `Address` items (represented as `dict` in JSON). """ self.contact = None """ Contact for the organization for a certain purpose. List of `OrganizationContact` items (represented as `dict` in JSON). """ self.identifier = None """ Identifies this organization across multiple systems. List of `Identifier` items (represented as `dict` in JSON). """ self.name = None """ Name used for the organization. Type `str`. """ self.partOf = None """ The organization of which this organization forms a part. Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """ self.telecom = None """ A contact detail for the organization. List of `ContactPoint` items (represented as `dict` in JSON). """ self.type = None """ Kind of organization. Type `CodeableConcept` (represented as `dict` in JSON). """ super(Organization, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(Organization, self).elementProperties() js.extend([ ("active", "active", bool, False, None, False), ("address", "address", address.Address, True, None, False), ("contact", "contact", OrganizationContact, True, None, False), ("identifier", "identifier", identifier.Identifier, True, None, False), ("name", "name", str, False, None, False), ("partOf", "partOf", fhirreference.FHIRReference, False, None, False), ("telecom", "telecom", contactpoint.ContactPoint, True, None, False), ("type", "type", codeableconcept.CodeableConcept, False, None, False), ]) return js from . import backboneelement class OrganizationContact(backboneelement.BackboneElement): """ Contact for the organization for a certain purpose. """ resource_name = "OrganizationContact" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.address = None """ Visiting or postal addresses for the contact. Type `Address` (represented as `dict` in JSON). """ self.name = None """ A name associated with the contact. Type `HumanName` (represented as `dict` in JSON). """ self.purpose = None """ The type of contact. Type `CodeableConcept` (represented as `dict` in JSON). """ self.telecom = None """ Contact details (telephone, email, etc.) for a contact. List of `ContactPoint` items (represented as `dict` in JSON). """ super(OrganizationContact, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(OrganizationContact, self).elementProperties() js.extend([ ("address", "address", address.Address, False, None, False), ("name", "name", humanname.HumanName, False, None, False), ("purpose", "purpose", codeableconcept.CodeableConcept, False, None, False), ("telecom", "telecom", contactpoint.ContactPoint, True, None, False), ]) return js from . import address from . import codeableconcept from . import contactpoint from . import fhirreference from . import humanname from . import identifier
bsd-3-clause
6,798,010,589,699,098,000
38.109375
103
0.621055
false
goord/ece2cmor3
ece2cmor3/components.py
1
1721
import os import logging # Logger instance log = logging.getLogger(__name__) table_file = "table_file" realms = "realms" # List of components, used to determine the script arguments and used by task loader. # Add your NEWCOMPONENT to this dictionary if you want to extend ece2cmor3 to more models. models = {"ifs": {realms: ["atmos", "atmosChem", "land", "landIce", "ocean", "seaIce"], table_file: os.path.join(os.path.dirname(__file__), "resources", "ifspar.json")}, "nemo": {realms: ["ocean", "ocnBgchem", "seaIce"], table_file: os.path.join(os.path.dirname(__file__), "resources", "nemopar.json")}, "lpjg": {realms: ["land", "atmos"], table_file: os.path.join(os.path.dirname(__file__), "resources", "lpjgpar.json")}, "tm5": {realms: ["aerosol", "atmosChem", "atmos"], table_file: os.path.join(os.path.dirname(__file__), "resources", "tm5par.json")} } ece_configs = {'EC-EARTH-AOGCM' : ["ifs", "nemo" ], 'EC-EARTH-HR' : ["ifs", "nemo" ], 'EC-EARTH-LR' : ["ifs", "nemo" ], 'EC-EARTH-CC' : ["ifs", "nemo", "tm5", "lpjg"], 'EC-EARTH-GrisIS' : ["ifs", "nemo" ], # If a PISM component is added to ece2cmor3 it needs here to be added as well. 'EC-EARTH-AerChem' : ["ifs", "nemo", "tm5" ], 'EC-EARTH-Veg' : ["ifs", "nemo", "lpjg" ], 'EC-EARTH-Veg-LR' : ["ifs", "nemo", "lpjg" ]} def load_parameter_table(component, filename): if component in models: models[component][table_file] = filename
apache-2.0
-4,729,310,459,605,378,000
51.151515
146
0.524114
false
4Kaylum/Steamfront
steamfront/client.py
1
4423
from requests import get as _get from .app import App as _App from .user import User as _User from .errors import AppNotFound as _AppNotFound from .errors import MissingArguments as _MissingArguments class Client(object): ''' Provides a client for you to get apps, users, and other miscellania with. :param apiKey: The key used for API functions. This is not required for all methods, but a good few of them. Defaults to ``None`` if no key is passed on client creation. :type apiKey: Optional[str] ''' def __init__(self, apiKey: str=None): self._apiKey = apiKey self._appList = None # # Populate game list # self._getGamesFromSteam() def _getGamesFromSteam(self) -> list: ''' Gives a list of all games on Steam. ''' # Get the list from the API steamAppList = 'http://api.steampowered.com/ISteamApps/GetAppList/v0001/' dictGames = _get(steamAppList) # Get the list from the dictionary jsonGames = dictGames.json() gameList = jsonGames['applist']['apps']['app'] # Store everything nicely self._appList = gameList return gameList def _getIDOfApp(self, name: str, caseSensitive: bool=True) -> str: ''' Gives the ID of an app whose name you have ''' # Refresh/make the app list if necessary if self._appList == None: self._getGamesFromSteam() sensitive = lambda x, y: x == y['name'] unsensitive = lambda x, y: x.lower() == y['name'].lower() searchStyle = {True: sensitive, False: unsensitive}[caseSensitive] # Iterate through the list and get the game's name. for i in self._appList: if searchStyle(name, i): return i['appid'] # No game found, raise error raise _AppNotFound( 'The name `{}` was not found on the API. Try using an app ID.'.format(name)) def getApp(self, *, name: str=None, appid: str=None, caseSensitive: bool=True) -> _App: ''' Returns a :class:`steamfront.app.App` of the name or app ID that was input to the function. :param str appid: The ID of the app you're getting the object of. :param str name: The name of the app you're getting the object of. May not be 100% accurate. :param bool caseSensitive: Whether or not the name being searched for is case sensitive or not. Has no effect on appid. :return: The object of relevant data on the app. :rtype: :class:`steamfront.app.App` :raises steamfront.errors.MissingArguments: Raised if there is neither a name or an app id passed. :raises steamfront.errors.AppNotFound: Raised if the app or name provided can't be found. ''' if appid is not None: # An app's ID was passed, get its object return _App(appid) elif name is not None: # A name was passed, get its ID and then return its object appid = self._getIDOfApp(name, caseSensitive) return _App(appid) else: # Neither was passed, raise MissingArguments raise _MissingArguments('Missing parameters: `name` or `appid`.') def getUser(self, *, name: str=None, id64: str=None) -> _User: ''' Returns a :class:`steamfront.user.User` of the name or ID64 that was input to the function. :param str id64: The ID64 of a user you want the object of. :param str name: The Steam ID (name) of a user you want the object of. Names are case sensitive. :return: The object of relevant data on the user. :rtype: :class:`steamfront.user.User` :raises steamfront.errors.MissingArguments: Raised if there is neither a name or an ID64 passed. ''' if id64 is not None: # A user's ID64 was passed, get its object return _User(id64, apiKey=self._apiKey) elif name is not None: # A user's name was passed, get its ID64 and then return its object raise NotImplementedError('This is yet to be implemented. Please use an ID64.') # id64 = self._getIDOfUser(name) # return _User(id64, apiKey=self._apiKey) else: # Neither was passed, raise MissingArguments raise _MissingArguments('Missing parameters: `name` or `id64`.')
mit
6,877,528,099,360,987,000
37.46087
173
0.620393
false
m-ober/byceps
tests/blueprints/admin/newsletter/test_views.py
1
5127
""" :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from datetime import datetime import pytest from byceps.services.newsletter.models import ( SubscriptionUpdate as DbSubscriptionUpdate, ) from byceps.services.newsletter import command_service from byceps.services.newsletter.types import SubscriptionState from tests.helpers import ( assign_permissions_to_user, create_user, http_client, login_user, ) def test_export_subscribers(newsletter_list, subscribers, client): expected_data = { 'subscribers': [ { 'screen_name': 'User-1', 'email_address': '[email protected]', }, # User #2 has declined a subscription, and thus should be # excluded. # User #3 is not initialized, and thus should be excluded. # User #4 has initially declined, but later requested a # subscription, so it should be included. { 'screen_name': 'User-4', 'email_address': '[email protected]', }, # User #5 has initially requested, but later declined a # subscription, so it should be excluded. { 'screen_name': 'User-6', 'email_address': '[email protected]', }, # User #7 has been suspended and should be excluded, regardless # of subscription state. # User #8 has been deleted and should be excluded, regardless # of subscription state. ], } url = f'/admin/newsletter/lists/{newsletter_list.id}/subscriptions/export' response = client.get(url) assert response.status_code == 200 assert response.content_type == 'application/json' assert response.json == expected_data def test_export_subscriber_email_addresses(newsletter_list, subscribers, client): expected_data = '\n'.join([ '[email protected]', # User #2 has declined a subscription. # User #3 is not initialized. # User #4 has initially declined, but later requested a subscription. '[email protected]', # User #5 has initially requested, but later declined a subscription. '[email protected]', # User #7 has been suspended, and thus should be excluded. # User #8 has been deleted, and thus should be excluded. ]).encode('utf-8') url = f'/admin/newsletter/lists/{newsletter_list.id}/subscriptions/email_addresses/export' response = client.get(url) assert response.status_code == 200 assert response.content_type == 'text/plain; charset=utf-8' assert response.mimetype == 'text/plain' assert response.get_data() == expected_data @pytest.fixture(scope='module') def newsletter_admin(): admin = create_user('NewsletterAdmin') permission_ids = {'admin.access', 'newsletter.export_subscribers'} assign_permissions_to_user(admin.id, 'admin', permission_ids) login_user(admin.id) return admin @pytest.fixture(scope='module') def newsletter_list(app): return command_service.create_list('example', 'Example') @pytest.fixture(scope='module') def subscribers(db, newsletter_list): for number, initialized, suspended, deleted, states in [ (1, True, False, False, [SubscriptionState.requested ]), (2, True, False, False, [SubscriptionState.declined ]), (3, False, False, False, [SubscriptionState.requested ]), (4, True, False, False, [SubscriptionState.declined, SubscriptionState.requested]), (5, True, False, False, [SubscriptionState.requested, SubscriptionState.declined ]), (6, True, False, False, [SubscriptionState.requested ]), (7, True, True , False, [SubscriptionState.requested ]), (8, True, False, True , [SubscriptionState.requested ]), ]: user = create_user( screen_name=f'User-{number:d}', email_address=f'user{number:03d}@example.com', initialized=initialized, ) if suspended: user.suspended = True db.session.commit() if deleted: user.deleted = True db.session.commit() add_subscriptions(db, user.id, newsletter_list.id, states) def add_subscriptions(db, user_id, list_id, states): for state in states: # Timestamp must not be identical for multiple # `(user_id, list_id)` pairs. expressed_at = datetime.utcnow() subscription_update = DbSubscriptionUpdate( user_id, list_id, expressed_at, state ) db.session.add(subscription_update) db.session.commit() @pytest.fixture(scope='module') def client(app, newsletter_admin): """Provide a test HTTP client against the API.""" with http_client(app, user_id=newsletter_admin.id) as client: yield client
bsd-3-clause
68,330,774,726,850,600
32.077419
94
0.610884
false
goldhand/onegreek
onegreek/old.comments/moderation.py
1
13491
""" A generic comment-moderation system which allows configuration of moderation options on a per-model basis. To use, do two things: 1. Create or import a subclass of ``CommentModerator`` defining the options you want. 2. Import ``moderator`` from this module and register one or more models, passing the models and the ``CommentModerator`` options class you want to use. Example ------- First, we define a simple model class which might represent entries in a Weblog:: from django.db import models class Entry(models.Model): title = models.CharField(maxlength=250) body = models.TextField() pub_date = models.DateField() enable_comments = models.BooleanField() Then we create a ``CommentModerator`` subclass specifying some moderation options:: from comments.moderation import CommentModerator, moderator class EntryModerator(CommentModerator): email_notification = True enable_field = 'enable_comments' And finally register it for moderation:: moderator.register(Entry, EntryModerator) This sample class would apply two moderation steps to each new comment submitted on an Entry: * If the entry's ``enable_comments`` field is set to ``False``, the comment will be rejected (immediately deleted). * If the comment is successfully posted, an email notification of the comment will be sent to site staff. For a full list of built-in moderation options and other configurability, see the documentation for the ``CommentModerator`` class. """ import datetime from django.conf import settings from django.core.mail import send_mail from django.db.models.base import ModelBase from django.template import Context, loader from django.contrib.sites.models import get_current_site from django.utils import timezone import comments import signals class AlreadyModerated(Exception): """ Raised when a model which is already registered for moderation is attempting to be registered again. """ pass class NotModerated(Exception): """ Raised when a model which is not registered for moderation is attempting to be unregistered. """ pass class CommentModerator(object): """ Encapsulates comment-moderation options for a given model. This class is not designed to be used directly, since it doesn't enable any of the available moderation options. Instead, subclass it and override attributes to enable different options:: ``auto_close_field`` If this is set to the name of a ``DateField`` or ``DateTimeField`` on the model for which comments are being moderated, new comments for objects of that model will be disallowed (immediately deleted) when a certain number of days have passed after the date specified in that field. Must be used in conjunction with ``close_after``, which specifies the number of days past which comments should be disallowed. Default value is ``None``. ``auto_moderate_field`` Like ``auto_close_field``, but instead of outright deleting new comments when the requisite number of days have elapsed, it will simply set the ``is_public`` field of new comments to ``False`` before saving them. Must be used in conjunction with ``moderate_after``, which specifies the number of days past which comments should be moderated. Default value is ``None``. ``close_after`` If ``auto_close_field`` is used, this must specify the number of days past the value of the field specified by ``auto_close_field`` after which new comments for an object should be disallowed. Default value is ``None``. ``email_notification`` If ``True``, any new comment on an object of this model which survives moderation will generate an email to site staff. Default value is ``False``. ``enable_field`` If this is set to the name of a ``BooleanField`` on the model for which comments are being moderated, new comments on objects of that model will be disallowed (immediately deleted) whenever the value of that field is ``False`` on the object the comment would be attached to. Default value is ``None``. ``moderate_after`` If ``auto_moderate_field`` is used, this must specify the number of days past the value of the field specified by ``auto_moderate_field`` after which new comments for an object should be marked non-public. Default value is ``None``. Most common moderation needs can be covered by changing these attributes, but further customization can be obtained by subclassing and overriding the following methods. Each method will be called with three arguments: ``comment``, which is the comment being submitted, ``content_object``, which is the object the comment will be attached to, and ``request``, which is the ``HttpRequest`` in which the comment is being submitted:: ``allow`` Should return ``True`` if the comment should be allowed to post on the content object, and ``False`` otherwise (in which case the comment will be immediately deleted). ``email`` If email notification of the new comment should be sent to site staff or moderators, this method is responsible for sending the email. ``moderate`` Should return ``True`` if the comment should be moderated (in which case its ``is_public`` field will be set to ``False`` before saving), and ``False`` otherwise (in which case the ``is_public`` field will not be changed). Subclasses which want to introspect the model for which comments are being moderated can do so through the attribute ``_model``, which will be the model class. """ auto_close_field = None auto_moderate_field = None close_after = None email_notification = False enable_field = None moderate_after = None def __init__(self, model): self._model = model def _get_delta(self, now, then): """ Internal helper which will return a ``datetime.timedelta`` representing the time between ``now`` and ``then``. Assumes ``now`` is a ``datetime.date`` or ``datetime.datetime`` later than ``then``. If ``now`` and ``then`` are not of the same type due to one of them being a ``datetime.date`` and the other being a ``datetime.datetime``, both will be coerced to ``datetime.date`` before calculating the delta. """ if now.__class__ is not then.__class__: now = datetime.date(now.year, now.month, now.day) then = datetime.date(then.year, then.month, then.day) if now < then: raise ValueError("Cannot determine moderation rules because date field is set to a value in the future") return now - then def allow(self, comment, content_object, request): """ Determine whether a given comment is allowed to be posted on a given object. Return ``True`` if the comment should be allowed, ``False otherwise. """ if self.enable_field: if not getattr(content_object, self.enable_field): return False if self.auto_close_field and self.close_after is not None: close_after_date = getattr(content_object, self.auto_close_field) if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after: return False return True def moderate(self, comment, content_object, request): """ Determine whether a given comment on a given object should be allowed to show up immediately, or should be marked non-public and await approval. Return ``True`` if the comment should be moderated (marked non-public), ``False`` otherwise. """ if self.auto_moderate_field and self.moderate_after is not None: moderate_after_date = getattr(content_object, self.auto_moderate_field) if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after: return True return False def email(self, comment, content_object, request): """ Send email notification of a new comment to site staff when email notifications have been requested. """ if not self.email_notification: return recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS] t = loader.get_template('comments/comment_notification_email.txt') c = Context({ 'comment': comment, 'content_object': content_object }) subject = '[%s] New comment posted on "%s"' % (get_current_site(request).name, content_object) message = t.render(c) send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True) class Moderator(object): """ Handles moderation of a set of models. An instance of this class will maintain a list of one or more models registered for comment moderation, and their associated moderation classes, and apply moderation to all incoming comments. To register a model, obtain an instance of ``Moderator`` (this module exports one as ``moderator``), and call its ``register`` method, passing the model class and a moderation class (which should be a subclass of ``CommentModerator``). Note that both of these should be the actual classes, not instances of the classes. To cease moderation for a model, call the ``unregister`` method, passing the model class. For convenience, both ``register`` and ``unregister`` can also accept a list of model classes in place of a single model; this allows easier registration of multiple models with the same ``CommentModerator`` class. The actual moderation is applied in two phases: one prior to saving a new comment, and the other immediately after saving. The pre-save moderation may mark a comment as non-public or mark it to be removed; the post-save moderation may delete a comment which was disallowed (there is currently no way to prevent the comment being saved once before removal) and, if the comment is still around, will send any notification emails the comment generated. """ def __init__(self): self._registry = {} self.connect() def connect(self): """ Hook up the moderation methods to pre- and post-save signals from the comment models. """ signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model()) signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model()) def register(self, model_or_iterable, moderation_class): """ Register a model or a list of models for comment moderation, using a particular moderation class. Raise ``AlreadyModerated`` if any of the models are already registered. """ if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model in self._registry: raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name) self._registry[model] = moderation_class(model) def unregister(self, model_or_iterable): """ Remove a model or a list of models from the list of models whose comments will be moderated. Raise ``NotModerated`` if any of the models are not currently registered for moderation. """ if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model not in self._registry: raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name) del self._registry[model] def pre_save_moderation(self, sender, comment, request, **kwargs): """ Apply any necessary pre-save moderation steps to new comments. """ model = comment.content_type.model_class() if model not in self._registry: return content_object = comment.content_object moderation_class = self._registry[model] # Comment will be disallowed outright (HTTP 403 response) if not moderation_class.allow(comment, content_object, request): return False if moderation_class.moderate(comment, content_object, request): comment.is_public = False def post_save_moderation(self, sender, comment, request, **kwargs): """ Apply any necessary post-save moderation steps to new comments. """ model = comment.content_type.model_class() if model not in self._registry: return self._registry[model].email(comment, comment.content_object, request) # Import this instance in your own code to use in registering # your models for moderation. moderator = Moderator()
bsd-3-clause
898,591,411,687,910,100
36.789916
132
0.663924
false
vjFaLk/frappe
frappe/core/doctype/prepared_report/prepared_report.py
1
2876
# -*- coding: utf-8 -*- # Copyright (c) 2018, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import base64 import json import frappe from frappe.model.document import Document from frappe.utils.background_jobs import enqueue from frappe.desk.query_report import generate_report_result from frappe.utils.file_manager import save_file, remove_all from frappe.desk.form.load import get_attachments from frappe.utils.file_manager import get_file from frappe.utils import gzip_compress, gzip_decompress class PreparedReport(Document): def before_insert(self): self.status = "Queued" self.report_start_time = frappe.utils.now() def enqueue_report(self): enqueue( run_background, prepared_report=self.name, timeout=6000 ) def on_trash(self): remove_all("PreparedReport", self.name, from_delete=True) def run_background(prepared_report): instance = frappe.get_doc("Prepared Report", prepared_report) report = frappe.get_doc("Report", instance.ref_report_doctype) try: report.custom_columns = [] if report.report_type == 'Custom Report': custom_report_doc = report reference_report = custom_report_doc.reference_report report = frappe.get_doc("Report", reference_report) report.custom_columns = custom_report_doc.json result = generate_report_result(report, filters=instance.filters, user=instance.owner) create_json_gz_file(result['result'], 'Prepared Report', instance.name) instance.status = "Completed" instance.columns = json.dumps(result["columns"]) instance.report_end_time = frappe.utils.now() instance.save() except Exception: frappe.log_error(frappe.get_traceback()) instance = frappe.get_doc("Prepared Report", prepared_report) instance.status = "Error" instance.error_message = frappe.get_traceback() instance.save() frappe.publish_realtime( 'report_generated', {"report_name": instance.report_name, "name": instance.name}, user=frappe.session.user ) def create_json_gz_file(data, dt, dn): # Storing data in CSV file causes information loss # Reports like P&L Statement were completely unsuable because of this json_filename = '{0}.json.gz'.format(frappe.utils.data.format_datetime(frappe.utils.now(), "Y-m-d-H:M")) encoded_content = frappe.safe_encode(frappe.as_json(data)) # GZip compression seems to reduce storage requirements by 80-90% compressed_content = gzip_compress(encoded_content) save_file( fname=json_filename, content=compressed_content, dt=dt, dn=dn, folder=None, is_private=False) @frappe.whitelist() def download_attachment(dn): attachment = get_attachments("Prepared Report", dn)[0] frappe.local.response.filename = attachment.file_name[:-2] frappe.local.response.filecontent = gzip_decompress(get_file(attachment.name)[1]) frappe.local.response.type = "binary"
mit
-1,897,790,314,647,639,300
30.26087
105
0.749305
false
babyliynfg/cross
tools/project-creator/Python2.6.6/Lib/test/test_extcall.py
1
7096
# -*- coding: utf-8 -*- """Doctest for method/function calls. We're going the use these types for extra testing >>> from UserList import UserList >>> from UserDict import UserDict We're defining four helper functions >>> def e(a,b): ... print a, b >>> def f(*a, **k): ... print a, test_support.sortdict(k) >>> def g(x, *y, **z): ... print x, y, test_support.sortdict(z) >>> def h(j=1, a=2, h=3): ... print j, a, h Argument list examples >>> f() () {} >>> f(1) (1,) {} >>> f(1, 2) (1, 2) {} >>> f(1, 2, 3) (1, 2, 3) {} >>> f(1, 2, 3, *(4, 5)) (1, 2, 3, 4, 5) {} >>> f(1, 2, 3, *[4, 5]) (1, 2, 3, 4, 5) {} >>> f(1, 2, 3, *UserList([4, 5])) (1, 2, 3, 4, 5) {} Here we add keyword arguments >>> f(1, 2, 3, **{'a':4, 'b':5}) (1, 2, 3) {'a': 4, 'b': 5} >>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7}) (1, 2, 3, 4, 5) {'a': 6, 'b': 7} >>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9}) (1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5} >>> f(1, 2, 3, **UserDict(a=4, b=5)) (1, 2, 3) {'a': 4, 'b': 5} >>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7)) (1, 2, 3, 4, 5) {'a': 6, 'b': 7} >>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9)) (1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5} Examples with invalid arguments (TypeErrors). We're also testing the function names in the exception messages. Verify clearing of SF bug #733667 >>> e(c=4) Traceback (most recent call last): ... TypeError: e() got an unexpected keyword argument 'c' >>> g() Traceback (most recent call last): ... TypeError: g() takes at least 1 argument (0 given) >>> g(*()) Traceback (most recent call last): ... TypeError: g() takes at least 1 argument (0 given) >>> g(*(), **{}) Traceback (most recent call last): ... TypeError: g() takes at least 1 argument (0 given) >>> g(1) 1 () {} >>> g(1, 2) 1 (2,) {} >>> g(1, 2, 3) 1 (2, 3) {} >>> g(1, 2, 3, *(4, 5)) 1 (2, 3, 4, 5) {} >>> class Nothing: pass ... >>> g(*Nothing()) Traceback (most recent call last): ... TypeError: g() argument after * must be a sequence, not instance >>> class Nothing: ... def __len__(self): return 5 ... >>> g(*Nothing()) Traceback (most recent call last): ... TypeError: g() argument after * must be a sequence, not instance >>> class Nothing(): ... def __len__(self): return 5 ... def __getitem__(self, i): ... if i<3: return i ... else: raise IndexError(i) ... >>> g(*Nothing()) 0 (1, 2) {} >>> class Nothing: ... def __init__(self): self.c = 0 ... def __iter__(self): return self ... def next(self): ... if self.c == 4: ... raise StopIteration ... c = self.c ... self.c += 1 ... return c ... >>> g(*Nothing()) 0 (1, 2, 3) {} Make sure that the function doesn't stomp the dictionary >>> d = {'a': 1, 'b': 2, 'c': 3} >>> d2 = d.copy() >>> g(1, d=4, **d) 1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4} >>> d == d2 True What about willful misconduct? >>> def saboteur(**kw): ... kw['x'] = 'm' ... return kw >>> d = {} >>> kw = saboteur(a=1, **d) >>> d {} >>> g(1, 2, 3, **{'x': 4, 'y': 5}) Traceback (most recent call last): ... TypeError: g() got multiple values for keyword argument 'x' >>> f(**{1:2}) Traceback (most recent call last): ... TypeError: f() keywords must be strings >>> h(**{'e': 2}) Traceback (most recent call last): ... TypeError: h() got an unexpected keyword argument 'e' >>> h(*h) Traceback (most recent call last): ... TypeError: h() argument after * must be a sequence, not function >>> dir(*h) Traceback (most recent call last): ... TypeError: dir() argument after * must be a sequence, not function >>> None(*h) Traceback (most recent call last): ... TypeError: NoneType object argument after * must be a sequence, \ not function >>> h(**h) Traceback (most recent call last): ... TypeError: h() argument after ** must be a mapping, not function >>> dir(**h) Traceback (most recent call last): ... TypeError: dir() argument after ** must be a mapping, not function >>> None(**h) Traceback (most recent call last): ... TypeError: NoneType object argument after ** must be a mapping, \ not function >>> dir(b=1, **{'b': 1}) Traceback (most recent call last): ... TypeError: dir() got multiple values for keyword argument 'b' Another helper function >>> def f2(*a, **b): ... return a, b >>> d = {} >>> for i in xrange(512): ... key = 'k%d' % i ... d[key] = i >>> a, b = f2(1, *(2,3), **d) >>> len(a), len(b), b == d (3, 512, True) >>> class Foo: ... def method(self, arg1, arg2): ... return arg1+arg2 >>> x = Foo() >>> Foo.method(*(x, 1, 2)) 3 >>> Foo.method(x, *(1, 2)) 3 >>> Foo.method(*(1, 2, 3)) Traceback (most recent call last): ... TypeError: unbound method method() must be called with Foo instance as \ first argument (got int instance instead) >>> Foo.method(1, *[2, 3]) Traceback (most recent call last): ... TypeError: unbound method method() must be called with Foo instance as \ first argument (got int instance instead) A PyCFunction that takes only positional parameters shoud allow an empty keyword dictionary to pass without a complaint, but raise a TypeError if te dictionary is not empty >>> try: ... silence = id(1, *{}) ... True ... except: ... False True >>> id(1, **{'foo': 1}) Traceback (most recent call last): ... TypeError: id() takes no keyword arguments """ import unittest from test import test_support class UnicodeKeywordArgsTest(unittest.TestCase): def test_unicode_keywords(self): def f(a): return a self.assertEqual(f(**{u'a': 4}), 4) self.assertRaises(TypeError, f, **{u'stören': 4}) self.assertRaises(TypeError, f, **{u'someLongString':2}) try: f(a=4, **{u'a': 4}) except TypeError: pass else: self.fail("duplicate arguments didn't raise") def test_main(): from test import test_extcall # self import test_support.run_doctest(test_extcall, True) test_support.run_unittest(UnicodeKeywordArgsTest) if __name__ == '__main__': test_main()
mit
-570,431,911,954,967,740
23.159574
77
0.475828
false
fedora-conary/conary
conary/deps/deps.py
1
63625
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import itertools import re import weakref from conary.lib import api from conary.lib.ext import dep_freeze from conary.errors import ParseError DEP_CLASS_ABI = 0 DEP_CLASS_IS = 1 DEP_CLASS_OLD_SONAME = 2 DEP_CLASS_FILES = 3 DEP_CLASS_TROVES = 4 DEP_CLASS_USE = 5 DEP_CLASS_SONAME = 6 DEP_CLASS_USERINFO = 7 DEP_CLASS_GROUPINFO = 8 DEP_CLASS_CIL = 9 DEP_CLASS_JAVA = 10 DEP_CLASS_PYTHON = 11 DEP_CLASS_PERL = 12 DEP_CLASS_RUBY = 13 DEP_CLASS_PHP = 14 DEP_CLASS_TARGET_IS = 15 DEP_CLASS_RPM = 16 DEP_CLASS_RPMLIB = 17 DEP_CLASS_SENTINEL = 18 DEP_CLASS_NO_FLAGS = 0 DEP_CLASS_HAS_FLAGS = 1 DEP_CLASS_OPT_FLAGS = 2 FLAG_SENSE_UNSPECIFIED = 0 # used FlavorScore indices FLAG_SENSE_REQUIRED = 1 FLAG_SENSE_PREFERRED = 2 FLAG_SENSE_PREFERNOT = 3 FLAG_SENSE_DISALLOWED = 4 DEP_MERGE_TYPE_NORMAL = 1 # conflicts are reported DEP_MERGE_TYPE_OVERRIDE = 2 # new data wins DEP_MERGE_TYPE_PREFS = 3 # like override, but a new !ssl loses # to an old ~!ssl and a new ~!ssl # loses to an old !ssl DEP_MERGE_TYPE_DROP_CONFLICTS = 4 # conflicting flags are removed senseMap = { FLAG_SENSE_REQUIRED : "", FLAG_SENSE_PREFERRED : "~", FLAG_SENSE_PREFERNOT : "~!", FLAG_SENSE_DISALLOWED : "!" } toStrongMap = { FLAG_SENSE_REQUIRED : FLAG_SENSE_REQUIRED, FLAG_SENSE_PREFERRED : FLAG_SENSE_REQUIRED, FLAG_SENSE_PREFERNOT : FLAG_SENSE_DISALLOWED, FLAG_SENSE_DISALLOWED : FLAG_SENSE_DISALLOWED } toWeakMap = { FLAG_SENSE_REQUIRED : FLAG_SENSE_PREFERRED, FLAG_SENSE_PREFERRED : FLAG_SENSE_PREFERRED, FLAG_SENSE_PREFERNOT : FLAG_SENSE_PREFERNOT, FLAG_SENSE_DISALLOWED : FLAG_SENSE_PREFERNOT } strongSenses = set((FLAG_SENSE_REQUIRED, FLAG_SENSE_DISALLOWED)) senseReverseMap = {} for key, val in senseMap.iteritems(): senseReverseMap[val] = key class DependencyClassRegistry(dict): def __getitem__(self, key): return self.get(key, UnknownDependencyFactory(key)) dependencyClasses = DependencyClassRegistry() dependencyClassesByName = {} def _registerDepClass(classObj): global dependencyClasses classObj.compileRegexp() dependencyClasses[classObj.tag] = classObj dependencyClassesByName[classObj.tagName] = classObj class BaseDependency(object): __slots__ = ( '__weakref__' ) """ Implements a single dependency. This is relative to a DependencyClass, which is part of a DependencySet. Dependency Sets can be frozen and thawed. These are hashable, directly comparable, and implement a satisfies() method. """ def __hash__(self): raise NotImplementedError def __eq__(self, other): raise NotImplementedError def __str__(self): raise NotImplementedError def freeze(self): raise NotImplementedError def satisfies(self, required): raise NotImplementedError def mergeFlags(self, other): raise NotImplementedError def getName(self): raise NotImplementedError def getFlags(self): raise NotImplementedError def __init__(self): raise NotImplementedError class Dependency(BaseDependency): __slots__ = ( 'name', 'flags', ) def __hash__(self): val = hash(self.name) for flag in self.flags.iterkeys(): val ^= hash(flag) return val def __eq__(self, other): return other.name == self.name and other.flags == self.flags def __cmp__(self, other): return (cmp(self.name, other.name) or cmp(sorted(self.flags.iteritems()), sorted(other.flags.iteritems()))) def __str__(self): if self.flags: flags = self.flags.items() flags.sort() return "%s(%s)" % (self.name, " ".join([ "%s%s" % (senseMap[x[1]], x[0]) for x in flags])) else: return self.name def __repr__(self): if self.flags: return "Dependency('%s', flags=%s)" % (self.name, self.flags) else: return "Dependency('%s')" % (self.name) def score(self, required): """ Returns a flavor matching score. This dependency is considered the "system" and the other is the flavor of the trove. In terms of dependencies, this set "provides" and the other "requires". False is returned if the two dependencies conflict. """ if self.name != required.name: return False score = 0 for (requiredFlag, requiredSense) in required.flags.iteritems(): thisSense = self.flags.get(requiredFlag, FLAG_SENSE_UNSPECIFIED) thisScore = flavorScores[(thisSense, requiredSense)] if thisScore is None: return False score += thisScore return score def emptyDepsScore(self): """ Like score where this trove is the "requires" and the other trove provides nothing. If all the requires are negative, (!foo) this could return something other than False """ score = 0 if not self.flags: # if there are no flags associated with this dependency, # then missing the base dep has to be enough to disqualify this # flavor return False for (requiredFlag, requiredSense) in self.flags.iteritems(): thisScore = flavorScores[(FLAG_SENSE_UNSPECIFIED, requiredSense)] if thisScore is None: return False score += thisScore return score def satisfies(self, required): """ Returns whether or not this dependency satisfies the argument (which is a requires). @type required: Dependency """ return self.score(required) is not False def toStrongFlavor(self): newFlags = self.flags.copy() for (flag, sense) in self.flags.iteritems(): newFlags[flag] = toStrongMap[sense] return Dependency(self.name, newFlags) def intersection(self, other, strict=True): """ Performs the intersection between the two dependencies, returning a dependency with only those flags in both dependencies. If strict is False, ignore the difference between ~foo and foo, returning with the flag set as it is in self. """ intFlags = {} for (flag, sense) in other.flags.iteritems(): if flag in self.flags: if strict: if self.flags[flag] == sense: intFlags[flag] = sense elif toStrongMap[self.flags[flag]] == toStrongMap[sense]: intFlags[flag] = toStrongMap[sense] if not intFlags: if self.flags != other.flags: return None return Dependency(self.name, intFlags) def __and__(self, other): return self.intersection(other) def difference(self, other, strict=True): """ Performs the difference between the two dependencies, returning a dependency with only those flags in self but not in other. If strict is false, also remove flags that differ only in the strength of the sense, but not its direction (e.g. ~!foo and !foo). """ diffFlags = self.flags.copy() if not strict: unseenFlags = set(self.flags.iterkeys()) else: unseenFlags = set() for flag, sense in other.flags.iteritems(): if flag in diffFlags: if strict: if sense == diffFlags[flag]: del diffFlags[flag] elif toStrongMap[sense] == toStrongMap[diffFlags[flag]]: del diffFlags[flag] unseenFlags.discard(flag) #for flag in unseenFlags: # if diffFlags[flag] in (FLAG_SENSE_PREFERNOT, FLAG_SENSE_PREFERRED): # del diffFlags[flag] if not diffFlags: return None else: return Dependency(self.name, diffFlags) def __sub__(self, other): return self.difference(other) def mergeFlags(self, other, mergeType = DEP_MERGE_TYPE_NORMAL): """ Returns a new Dependency which merges the flags from the two existing dependencies. We don't want to merge in place as this Dependency could be shared between many objects (via a DependencyGroup). """ allFlags = self.flags.copy() for (flag, otherSense) in other.flags.iteritems(): if mergeType == DEP_MERGE_TYPE_OVERRIDE or flag not in allFlags: allFlags[flag] = otherSense continue thisSense = allFlags[flag] if thisSense == otherSense: # same flag, same sense continue thisStrong = thisSense in strongSenses otherStrong = otherSense in strongSenses if thisStrong == otherStrong: if mergeType == DEP_MERGE_TYPE_DROP_CONFLICTS: del allFlags[flag] continue elif mergeType == DEP_MERGE_TYPE_PREFS: # in cases where there's a conflict, new wins allFlags[flag] = otherSense continue thisFlag = "%s%s" % (senseMap[thisSense], flag) otherFlag = "%s%s" % (senseMap[otherSense], flag) raise RuntimeError, ("Invalid flag combination in merge:" " %s and %s" % (thisFlag, otherFlag)) if mergeType == DEP_MERGE_TYPE_PREFS: if thisStrong and toStrongMap[otherSense] == thisSense: continue allFlags[flag] = toWeakMap[otherSense] continue # know they aren't the same, and they are compatible elif thisStrong: continue elif otherStrong: allFlags[flag] = otherSense continue # we shouldn't end up here assert(0) return Dependency(self.name, allFlags) def getName(self): return (self.name,) def getFlags(self): return (self.flags.items(),) def __init__(self, name, flags = []): self.name = name if type(flags) == dict: self.flags = flags else: self.flags = {} for (flag, sense) in flags: self.flags[flag] = sense class DependencyClass(object): __slots__ = ( 'members', ) depFormat = 'WORD' flagFormat = 'WORD' WORD = '(?:[.0-9A-Za-z_+-]+)' IDENT = '(?:[0-9A-Za-z_-]+)' flags = DEP_CLASS_NO_FLAGS depNameSignificant = True # if True, means that the name of the dependencies in the class hold # significance. This is important for comparing a dep set with all # negative flags for this dependency class (say use(!krb)) against # no dependencies of this dependency class. In the use flag case, # the dep name is not significant, for other dep classes, the dep name # does matter. allowParseDep = True @classmethod def compileRegexp(class_): """ Class method that takes the abstract information about the format of this dependency class and turns it into a regexp that will match dep strings that can be parsed into a dependency of this class. """ if not class_.allowParseDep: return d = dict(flagFormat=class_.flagFormat, depFormat=class_.depFormat, WORD=class_.WORD, IDENT=class_.IDENT) # zero or more space-separated flags flagFmt = '(?:\( *(%(flagFormat)s?(?: +%(flagFormat)s)*) *\))?' # add ^ and $ to ensure we match the entire string passed in regexp = ('^ *(%(depFormat)s) *' + flagFmt + ' *$') % d # word is a slightly larger group of chars than ident - # includes . and +, because those are used in paths and # sonames. May need to be larger some day, and probably # could be more restrictive for some groups. Should not contain # /, as that's used as a special char in many dep classes. regexp = regexp.replace('WORD', d['WORD']) regexp = regexp.replace('IDENT',d['IDENT']) class_.regexpStr = regexp class_.regexp = re.compile(regexp) @classmethod def parseDep(class_, s): """ Parses a dependency string of this class and returns the result. Raises a ParseError on failure. """ if not class_.allowParseDep: raise ParseError, "Invalid dependency class %s" % class_.tagName match = class_.regexp.match(s) if match is None: raise ParseError, "Invalid %s dependency: '%s'" % (class_.tagName, s) depName, flagStr = match.groups() # a dep is <depName>[(<flagStr>)] # flagStr is None if () not # in the depStr flags = [] if class_.flags == DEP_CLASS_NO_FLAGS: if flagStr is not None: # the dep string specified at least () - # not allowed when the dep has no flags raise ParseError, ("bad %s dependency '%s':" " flags not allowed" % (class_.tagName, s)) elif flagStr: flags = [ (x, FLAG_SENSE_REQUIRED) for x in flagStr.split()] elif class_.flags == DEP_CLASS_HAS_FLAGS: raise ParseError, ("bad %s dependency '%s':" " flags required" % (class_.tagName, s)) else: assert(class_.flags == DEP_CLASS_OPT_FLAGS) return Dependency(depName, flags) def addDep(self, dep, mergeType = DEP_MERGE_TYPE_NORMAL): assert(dep.__class__.__name__ == self.depClass.__name__) if dep.name in self.members: # this is a little faster then doing all of the work when # we could otherwise avoid it if dep == self.members[dep.name]: return # merge the flags, and add the newly created dependency # into the class dep = self.members[dep.name].mergeFlags(dep, mergeType = mergeType) self.members[dep.name] = dep assert(not self.justOne or len(self.members) == 1) def hasDep(self, name): return name in self.members def score(self, requirements): if self.tag != requirements.tag: return False score = 0 for requiredDep in requirements.members.itervalues(): if requiredDep.name not in self.members: if self.depNameSignificant: # dependency names are always 'requires', so if the # dependency class name is significant (i.e. the dep # class is only defined by its flags) the empty deps cannot # match. Otherwise, we use the empty deps score for the # flags return False thisScore = requiredDep.emptyDepsScore() else: thisScore = self.members[requiredDep.name].score(requiredDep) if thisScore is False: return False score += thisScore if self.depNameSignificant: score += 1 return score def emptyDepsScore(self): score = 0 if self.depNameSignificant: # dependency names are always 'requires', so if the # dependency class name is significant (i.e. the dep # class is only defined by its flags) the empty deps cannot # match. Otherwise, we use the empty deps score for the flags return False for requiredDep in self.members.itervalues(): thisScore = requiredDep.emptyDepsScore() if thisScore is False: return False score += thisScore return thisScore def toStrongFlavor(self): newDepClass = self.__class__() a = newDepClass.addDep for dep in self.members.values(): a(dep.toStrongFlavor()) return newDepClass def satisfies(self, requirements): return self.score(requirements) is not False def union(self, other, mergeType = DEP_MERGE_TYPE_NORMAL): if other is None: return a = self.addDep for otherdep in other.members.itervalues(): # calling this for duplicates is a noop a(otherdep, mergeType = mergeType) def __and__(self, other): return self.intersection(other) def intersection(self, other, strict=True): newDepClass = self.__class__() a = newDepClass.addDep found = False for tag, dep in self.members.iteritems(): if tag in other.members: dep = dep.intersection(other.members[tag], strict=strict) if dep is None: a(Dependency(tag)) else: a(dep) found = True if found: return newDepClass return None def difference(self, other, strict=True): newDepClass = self.__class__() a = newDepClass.addDep found = False for tag, dep in self.members.iteritems(): if tag in other.members: diff = dep.difference(other.members[tag], strict=strict) if diff is None: continue a(diff) else: newDepClass.addDep(dep) found = True if found: return newDepClass else: return None def __sub__(self, other): return self.difference(other) def getDeps(self): # sort by name for name, dep in sorted(self.members.iteritems()): yield dep def thawDependency(frozen): cached = dependencyCache.get(frozen, None) if cached: return cached name, flags = dep_freeze.depSplit(frozen) for i, flag in enumerate(flags): kind = flag[0:2] if kind == '~!': flags[i] = (flag[2:], FLAG_SENSE_PREFERNOT) elif kind[0] == '!': flags[i] = (flag[1:], FLAG_SENSE_DISALLOWED) elif kind[0] == '~': flags[i] = (flag[1:], FLAG_SENSE_PREFERRED) else: flags[i] = (flag, FLAG_SENSE_REQUIRED) d = Dependency(name, flags) dependencyCache[frozen] = d return d thawDependency = staticmethod(thawDependency) def __hash__(self): val = self.tag for dep in self.members.itervalues(): val ^= hash(dep) return val def __eq__(self, other): if other is None: return False return self.tag == other.tag and \ self.members == other.members def __cmp__(self, other): rv = cmp(sorted(self.members), sorted(other.members)) if rv: return rv for name, dep in self.members.iteritems(): rv = cmp(dep, other.members[name]) if rv: return rv return 0 def __ne__(self, other): return not self == other def __str__(self): memberList = self.members.items() memberList.sort() return "\n".join([ "%s: %s" % (self.tagName, dep[1]) for dep in memberList ]) def __init__(self): self.members = {} class AbiDependency(DependencyClass): __slots__ = () tag = DEP_CLASS_ABI tagName = "abi" justOne = False depClass = Dependency flags = DEP_CLASS_HAS_FLAGS _registerDepClass(AbiDependency) class InstructionSetDependency(DependencyClass): __slots__ = () tag = DEP_CLASS_IS tagName = "is" justOne = False depClass = Dependency allowParseDep = False flags = DEP_CLASS_HAS_FLAGS _registerDepClass(InstructionSetDependency) class TargetInstructionSetDependency(DependencyClass): __slots__ = () tag = DEP_CLASS_TARGET_IS tagName = "target" justOne = False depClass = Dependency allowParseDep = False flags = DEP_CLASS_HAS_FLAGS _registerDepClass(TargetInstructionSetDependency) class OldSonameDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_OLD_SONAME tagName = "oldsoname" justOne = False depClass = Dependency allowParseDep = False _registerDepClass(OldSonameDependencies) class SonameDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_SONAME tagName = "soname" justOne = False depClass = Dependency depFormat = 'IDENT(?:/WORD)*/WORD' flags = DEP_CLASS_HAS_FLAGS _registerDepClass(SonameDependencies) class UserInfoDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_USERINFO tagName = "userinfo" justOne = False depClass = Dependency flags = DEP_CLASS_NO_FLAGS _registerDepClass(UserInfoDependencies) class GroupInfoDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_GROUPINFO tagName = "groupinfo" justOne = False depClass = Dependency flags = DEP_CLASS_NO_FLAGS _registerDepClass(GroupInfoDependencies) class CILDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_CIL tagName = "CIL" justOne = False depClass = Dependency flags = DEP_CLASS_HAS_FLAGS depFormat = 'IDENT(?:\.IDENT)*' # foo[.bar]* flagFormat = '[0-9.]+' # 0-9[.0-9]* _registerDepClass(CILDependencies) class JavaDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_JAVA tagName = "java" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS _registerDepClass(JavaDependencies) class PythonDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_PYTHON tagName = "python" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS _registerDepClass(PythonDependencies) class PerlDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_PERL tagName = "perl" justOne = False depClass = Dependency depFormat = 'WORD(?:::WORD)*' # foo[::bar]* including foo::bar::baz flags = DEP_CLASS_OPT_FLAGS _registerDepClass(PerlDependencies) class RubyDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_RUBY tagName = "ruby" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS _registerDepClass(RubyDependencies) class PhpDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_PHP tagName = "php" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS _registerDepClass(PhpDependencies) class FileDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_FILES tagName = "file" justOne = False depClass = Dependency flags = DEP_CLASS_NO_FLAGS depFormat = '(?:/WORD)+' # /path[/path]* _registerDepClass(FileDependencies) class TroveDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_TROVES tagName = "trove" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS depFormat = 'WORD(?::IDENT)?' # trove[:comp] _registerDepClass(TroveDependencies) class UseDependency(DependencyClass): __slots__ = () tag = DEP_CLASS_USE tagName = "use" justOne = True depClass = Dependency allowParseDep = False depNameSignificant = False _registerDepClass(UseDependency) class RpmDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_RPM tagName = "rpm" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS WORD = '(?:[:.0-9A-Za-z_+-]+)' # allow colons in flags IDENT = r'(?:[][:.0-9A-Za-z_-]+)' # allow [] and colons in dep names flagFormat = "WORD" depFormat = "IDENT" _registerDepClass(RpmDependencies) class RpmLibDependencies(DependencyClass): __slots__ = () tag = DEP_CLASS_RPMLIB tagName = "rpmlib" justOne = False depClass = Dependency flags = DEP_CLASS_OPT_FLAGS _registerDepClass(RpmLibDependencies) def UnknownDependencyFactory(intTag): # Factory for unknown classes class _UnknownDependency(DependencyClass): tag = intTag tagName = "unknown-%s" % intTag depClass = Dependency justOne = False return _UnknownDependency #ld = {} class DependencySet(object): __slots__ = ( '_members', '_hash' ) def _getMembers(self): m = self._members if type(m) == str: #if False: # import sys, os # f = sys._getframe(2) # fn = f.f_code.co_filename # ln = f.f_lineno # global ld # if (fn, ln) not in ld: # l = open(fn).readlines()[ln - 1] # ld[(fn, ln)] = "%s:%s %s" % (os.path.basename(fn), ln, l) # if not l.strip(): # import epdb;epdb.st() # print ld[(fn, ln)], self._thaw() m = self._members return m members = property(_getMembers) def addDep(self, depClass, dep): assert(isinstance(dep, Dependency)) self._hash = None tag = depClass.tag c = self.members.setdefault(tag, depClass()) c.addDep(dep) def addDeps(self, depClass, deps): self._hash = None tag = depClass.tag c = self.members.setdefault(tag, depClass()) for dep in deps: c.addDep(dep) def iterDeps(self, sort=False): # since this is in an tight loop in some places, avoid overhead # of continual checks on the sort variable. if sort: for _, depClass in sorted(self.members.iteritems()): for _, dep in sorted(depClass.members.iteritems()): yield depClass.__class__, dep else: for depClass in self.members.itervalues(): for dep in depClass.members.itervalues(): yield depClass.__class__, dep def iterDepsByClass(self, depClass): if depClass.tag in self.members: c = self.members[depClass.tag] for dep in c.members.itervalues(): yield dep def iterRawDeps(self): # returns deps as (classId, name, flags) tuples if type(self._members) == str: next = 0 end = len(self._members) while next < end: (next, classId, depStr) = dep_freeze.depSetSplit( next, self._members) (name, flags) = dep_freeze.depSplit(depStr) yield (classId, name, flags) else: for depClass, oneDep in self.iterDeps(): yield (depClass.tag, oneDep.getName()[0], oneDep.getFlags()[0]) def hasDepClass(self, depClass): return depClass.tag in self.members def removeDeps(self, depClass, deps, missingOkay = False): self._hash = None if missingOkay and depClass.tag not in self.members: return c = self.members[depClass.tag] if missingOkay: for dep in deps: c.members.pop(dep.name, None) else: for dep in deps: del c.members[dep.name] if not self.members[depClass.tag].members: del self.members[depClass.tag] def removeDepsByClass(self, depClass): self._hash = None self.members.pop(depClass.tag, None) def addEmptyDepClass(self, depClass): """ adds an empty dependency class, which for flavors has different semantics when merging than not having a dependency class. See mergeFlavors """ self._hash = None tag = depClass.tag assert(tag not in self.members) self.members[tag] = depClass() def _thaw(self): frz = self._members self._members = dict() if not frz: return i = 0 a = self.addDep depSetSplit = dep_freeze.depSetSplit while i < len(frz): (i, tag, frozen) = depSetSplit(i, frz) depClass = dependencyClasses[tag] a(depClass, depClass.thawDependency(frozen)) def copy(self): new = self.__class__() if type(self._members) == str: new.thaw(self._members) else: add = new.addDep for depClass in self.members.itervalues(): cls = depClass.__class__ for dep in depClass.members.itervalues(): add(cls, dep) return new __copy__ = lambda s, x: s.copy() __deepcopy__ = lambda s, x: s.copy() def getDepClasses(self): return self.members def union(self, other, mergeType = DEP_MERGE_TYPE_NORMAL): if other is None: return assert(isinstance(other, self.__class__) or isinstance(self, other.__class__)) self._hash = None a = self.addDep for tag, members in other.members.iteritems(): c = members.__class__ if tag in self.members: self.members[tag].union(members, mergeType = mergeType) # If we're dropping conflicts, we might drop this class # of troves all together. if (mergeType == DEP_MERGE_TYPE_DROP_CONFLICTS and c.justOne and not self.members[tag].members.values()[0].flags): del self.members[tag] else: for dep in members.members.itervalues(): a(c, dep) def intersection(self, other, strict=True): assert(hasattr(other, '_members')) newDep = self.__class__() for tag, depClass in self.members.iteritems(): if tag in other.members: dep = depClass.intersection(other.members[tag], strict=strict) if dep is None: continue newDep.members[depClass.tag] = dep return newDep def __and__(self, other): return self.intersection(other) def difference(self, other, strict=True): assert(isinstance(other, self.__class__) or isinstance(self, other.__class__)) newDep = self.__class__() a = newDep.addDep for tag, depClass in self.members.iteritems(): c = depClass.__class__ if tag in other.members: dep = depClass.difference(other.members[tag], strict=strict) if dep is not None: newDep.members[tag] = dep else: for dep in depClass.members.itervalues(): a(c, dep) return newDep def __sub__(self, other): return self.difference(other) def score(self, other): # XXX this should force the classes to be the same, but the # flavor and DependencySet split would cause too much breakage # right now if we enforced that. We test for DependencySet # instead of self.__class__ assert(isinstance(other, DependencySet)) score = 0 for tag in other.members: # ignore empty dep classes when scoring if not other.members[tag].members: continue if tag not in self.members: thisScore = other.members[tag].emptyDepsScore() else: thisScore = self.members[tag].score(other.members[tag]) if thisScore is False: return False score += thisScore return score def satisfies(self, other): return self.score(other) is not False def __eq__(self, other, skipSet = None): if other is None: return False # No much sense in comparing stuff that is not the same class as ours; # it also breaks epydoc (CNY-1772). We don't compare the classes # here (even though it would be far more sensible) because conary # compares DependencySet objects to Flavors sometimes. Which is, # of course, awful. if not hasattr(other, '_members'): return False if type(self._members) == dict and type(other._members) == dict: if set(other.members.iterkeys()) != set(self.members.iterkeys()): return False for tag in other.members: if not self.members[tag] == other.members[tag]: return False return True if type(self._members) == dict: self._members = self.freeze() elif type(other._members) == dict: other._members = other.freeze() return self._members == other._members def __cmp__(self, other): if other is None: return -1 # XXX this should force the classes to be the same, but the # flavor and DependencySet split would cause too much breakage # right now if we enforced that. We test for DependencySet # instead of self.__class__ assert(isinstance(other, DependencySet)) myMembers = self.members otherMembers = other.members tags = [] for tag in xrange(DEP_CLASS_SENTINEL): if tag in myMembers: if tag in otherMembers: tags.append(tag) else: return -1 elif tag in otherMembers: return 1 # at this point we know we have the same dep classes. for tag in tags: myDepClass = myMembers[tag] otherDepClass = otherMembers[tag] # depClass compares keys first, then values, # exactly what we want here. rv = cmp(myDepClass, otherDepClass) if rv: return rv return 0 def __ne__(self, other): return not self == other def __hash__(self): if self._hash is None: self._hash = hash(self.freeze()) return self._hash def __nonzero__(self): return not(not(self._members)) def __str__(self): memberList = self.members.items() memberList.sort() return "\n".join([ str(x[1]) for x in memberList]) def freeze(self, skipSet = None): if type(self._members) == str: return self._members else: return dep_freeze.depSetFreeze(self.members) def isEmpty(self): return not(self._members) def __repr__(self): return "ThawDep('%s')" % self.freeze() def __getstate__(self): # If this method returns a false value (like the empty string) then # __setstate__ is not called, so we have to return something non-empty. return (self.freeze(),) def __setstate__(self, frozen): self.thaw(frozen[0]) def __init__(self, frz = None): if frz is not None: frz = intern(frz) self._members = frz else: self._members = '' self._hash = None thaw = __init__ # A special class for representing Flavors class Flavor(DependencySet): __slots__ = () def __repr__(self): return "Flavor('%s')" % formatFlavor(self) def __str__(self): return formatFlavor(self) def __nonzero__(self): # prohibit evaluating Flavor instances in boolean contexts raise SyntaxError, \ "Flavor objects can't be evaluated in a boolean context" @api.developerApi def toStrongFlavor(self): newDep = self.__class__() for tag, depClass in self.members.iteritems(): newDep.members[tag] = depClass.toStrongFlavor() return newDep @api.developerApi def stronglySatisfies(self, other): return self.toStrongFlavor().score( other.toStrongFlavor()) is not False def ThawDependencySet(frz): return DependencySet(frz) @api.publicApi def ThawFlavor(frz): """ @param frz: the frozen representation of a flavor @return: a thawed Flavor object @rtype: L{deps.deps.Flavor} @raises TypeError: could be raised if frozen object is malformed @raises ValueError: could be raised if frozen object is malformed """ f = Flavor() if isinstance(frz, unicode): try: frz = frz.encode("ascii") except UnicodeEncodeError: raise ParseError, ("invalid characters in flavor '%s'" % frz) f.thaw(frz) return f @api.developerApi def overrideFlavor(oldFlavor, newFlavor, mergeType=DEP_MERGE_TYPE_OVERRIDE): """ Performs overrides of flavors as expected when the new flavor is specified by a user -- the user's flavor overrides use flags, and if the user specifies any instruction sets, only those instruction sets will be in the final flavor. Flags for the specified instruction sets are merged with the old flavor. """ flavor = oldFlavor.copy() ISD = InstructionSetDependency TISD = TargetInstructionSetDependency for depClass in (ISD, TISD): if (flavor.hasDepClass(depClass) and newFlavor.hasDepClass(depClass)): arches = set() for dep in newFlavor.iterDepsByClass(depClass): arches.add(dep.name) oldArches = [] for dep in oldFlavor.iterDepsByClass(depClass): if dep.name not in arches: oldArches.append(dep) flavor.removeDeps(depClass, oldArches) flavor.union(newFlavor, mergeType=mergeType) return flavor def _mergeDeps(depList, mergeType): """ Returns a new Dependency which merges the flags from the two existing dependencies. We don't want to merge in place as this Dependency could be shared between many objects (via a DependencyGroup). """ name = depList[0].name flags = {} for dep in depList: assert(dep.name == name) for flag, sense in dep.flags.iteritems(): flags.setdefault(flag, []).append(sense) finalFlags = {} for flag, senses in flags.iteritems(): if mergeType == DEP_MERGE_TYPE_OVERRIDE: finalFlags[flag] = senses[-1] continue if FLAG_SENSE_REQUIRED in senses: posSense = FLAG_SENSE_REQUIRED strongestPos = 2 elif FLAG_SENSE_PREFERRED in senses: posSense = FLAG_SENSE_PREFERRED strongestPos = 1 else: strongestPos = 0 posSense = FLAG_SENSE_UNSPECIFIED if FLAG_SENSE_DISALLOWED in senses: negSense = FLAG_SENSE_DISALLOWED strongestNeg = 2 elif FLAG_SENSE_PREFERNOT in senses: negSense = FLAG_SENSE_PREFERNOT strongestNeg = 1 else: strongestNeg = 0 negSense = FLAG_SENSE_UNSPECIFIED if strongestNeg == strongestPos: if mergeType == DEP_MERGE_TYPE_DROP_CONFLICTS: continue if mergeType == DEP_MERGE_TYPE_PREFS: for sense in reversed(senses): if sense in (posSense, negSense): finalFlags[flag] = sense break continue else: thisFlag = "%s%s" % (senseMap[negSense], flag) otherFlag = "%s%s" % (senseMap[posSense], flag) raise RuntimeError, ("Invalid flag combination in merge:" " %s and %s" % (thisFlag, otherFlag)) elif mergeType == DEP_MERGE_TYPE_PREFS: origSense = senses[0] if (toStrongMap[origSense] == origSense and FLAG_SENSE_UNSPECIFIED in (posSense, negSense)): finalFlags[flag] = origSense else: finalFlags[flag] = toWeakMap[senses[-1]] else: finalFlags[flag] = max((strongestPos, posSense), (strongestNeg, negSense))[1] return Dependency(name, finalFlags) def mergeFlavorList(flavors, mergeType=DEP_MERGE_TYPE_NORMAL): for flavor in flavors: assert(isinstance(flavor, Flavor)) finalDep = Flavor() depClasses = set() for flavor in flavors: depClasses.update([ dependencyClasses[x] for x in flavor.getDepClasses()]) a = finalDep.addDep for depClass in depClasses: depsByName = {} for flavor in flavors: if flavor.hasDepClass(depClass): for dep in flavor.iterDepsByClass(depClass): depsByName.setdefault(dep.name, []).append(dep) for depList in depsByName.itervalues(): dep = _mergeDeps(depList, mergeType) if (depClass.justOne and mergeType == DEP_MERGE_TYPE_DROP_CONFLICTS and not dep.flags): continue a(depClass, dep) return finalDep def mergeFlavor(flavor, mergeBase): """ Merges the given flavor with the mergeBase - if flavor doesn't contain use flags, then include the mergeBase's use flags. If flavor doesn't contain an instruction set, then include the mergeBase's instruction set(s) """ if flavor is None: return mergeBase if mergeBase is None: return flavor needsIns = not flavor.hasDepClass(InstructionSetDependency) needsUse = not flavor.hasDepClass(UseDependency) if not (needsIns or needsUse): return flavor mergedFlavor = flavor.copy() if needsIns: insSets = list(mergeBase.iterDepsByClass(InstructionSetDependency)) if insSets: mergedFlavor.addDeps(InstructionSetDependency, insSets) if needsUse: useSet = list(mergeBase.iterDepsByClass(UseDependency)) if useSet: mergedFlavor.addDeps(UseDependency, useSet) return mergedFlavor def filterFlavor(depSet, filters): """ Returns a DependencySet based on an existing DependencySet object and a list of filters (which are themselves DependencySet objects). The new object will contain only dependencies which are in one or more of the filters, and the flags of those dependencies will be the flags of the object which also appear in one or more of the filters. Note that this uses _filterFlavorFlags(), and some of the specifics of the results are explained in the documentation for that function. This is equvalent to ( depSet & [ filter1 | filter 2 | ... | filterN ]) if the logical operators are not sense specific. @param depSet: Object to filter @type depSet: DependencySet @param filters: List of objects to filter based on @type filters: [ DependencySet ] @rtype: DependencySet """ if not isinstance(filters, (list, tuple)): filters = [filters] finalDepSet = Flavor() for depTag, depClass in depSet.members.items(): # Build a list of the DependencyClasses for this tag in each filter. filterClasses = [ x.members.get(depClass.tag, None) for x in filters ] filterClasses = [ x for x in filterClasses if x is not None ] if not filterClasses: # There is no overlap in the classes between this dep and the # filters, so we're done. This is just a shortcut; the rest of the # logic would noop anyway. continue depList = [] for dep in depClass.getDeps(): # Get all the DependencyClasses from the filters with the same # name as this dep filterDeps = [ x.members.get(dep.name, None) for x in filterClasses] filterDeps = [ x for x in filterDeps if x is not None ] if filterDeps: finalDep = _filterFlavorFlags(depClass, dep, filterDeps) if finalDep is not None: depList.append(finalDep) if depList: finalDepSet.addDeps(depClass.__class__, depList) return finalDepSet def _filterFlavorFlags(depClass, dep, filterDeps): """ Return a new Dependency object based on an existing Dependency object and a list of filters (which are themselves Dependency objects). The new object will have the same name as the original Dependency, but only flags which are present in both the original and one or more of the filters will be present. The sense is not used for matching; the sense from the original dependency is used in the return value. This is equvalent to ( dep & [ filter1 | filter 2 | ... | filterN ]) if the logical operators are not sense specific. While this may seem general purpose, it is only used for filtering flavor flags, and the name reflects this. @param depClass: Class object used to construct the returned dependency. Should be a child object of Dependency class. @type depClass: class @param dep: Object to filter flags from @type dep: Dependency @param filterDeps: Objects whose flags will be used to filter the dep parameter @type filterDeps: [ Dependency ] @rtype: Dependency """ filterFlags = set(itertools.chain(*(x.flags for x in filterDeps))) finalFlags = [ x for x in dep.flags.iteritems() if x[0] in filterFlags ] if not depClass.depNameSignificant and not finalFlags: return None return Dependency(dep.name, finalFlags) def getInstructionSetFlavor(flavor): if flavor is None: return None newFlavor = Flavor() targetISD = TargetInstructionSetDependency ISD = InstructionSetDependency # get just the arches, not any arch flags like mmx newFlavor.addDeps(ISD, [Dependency(x[1].name) for x in flavor.iterDeps() if x[0] is ISD]) targetDeps = [ Dependency(x[1].name) for x in flavor.iterDeps() if x[0] is targetISD ] if targetDeps: newFlavor.addDeps(targetISD, targetDeps) return newFlavor def formatFlavor(flavor): """ Formats a flavor and returns a string which parseFlavor can handle. """ def _singleClass(deps): l = [] for dep in deps: flags = dep.getFlags()[0] if flags: flags.sort() l.append("%s(%s)" % (dep.getName()[0], ",".join([ "%s%s" % (senseMap[x[1]], x[0]) for x in flags]))) else: l.append(dep.getName()[0]) l.sort() return " ".join(l) classes = flavor.getDepClasses() insSet = list(flavor.iterDepsByClass(InstructionSetDependency)) targetSet = list(flavor.iterDepsByClass(TargetInstructionSetDependency)) useFlags = list(flavor.iterDepsByClass(UseDependency)) if insSet: insSet = _singleClass(insSet) if targetSet: targetSet = _singleClass(targetSet) if useFlags: # strip the use() bit useFlags = _singleClass(useFlags)[4:-1] flavors = [] if useFlags: flavors.append(useFlags) if insSet: flavors.append('is: %s' % insSet) if targetSet: flavors.append('target: %s' % targetSet) return ' '.join(flavors) @api.developerApi def parseFlavor(s, mergeBase = None, raiseError = False): # return a Flavor dep set for the string passed. format is # [arch[(flag,[flag]*)]] [use:flag[,flag]*] # # if mergeBase is set, the parsed flavor is merged into it. The # rules for the merge are different than those for union() though; # the parsed flavor is assumed to set the is:, use:, or both. If # either class is unset, it's taken from mergeBase. def _fixup(flag): flag = flag.strip() if senseReverseMap.has_key(flag[0:2]): sense = senseReverseMap[flag[0:2]] flag = flag[2:] elif senseReverseMap.has_key(flag[0]): sense = senseReverseMap[flag[0]] flag = flag[1:] else: sense = FLAG_SENSE_REQUIRED return (flag, sense) # make it a noop if we get a Flavor object in here if isinstance(s, DependencySet): return s if isinstance(s, unicode): try: s = s.encode("ascii") except UnicodeEncodeError: raise ParseError, ("invalid characters in flavor '%s'" % s) s = s.strip() match = flavorRegexp.match(s) if not match: if raiseError: raise ParseError, ("invalid flavor '%s'" % s) return None groups = match.groups() set = Flavor() if groups[3]: # groups[3] is base instruction set, groups[4] is the flags, and # groups[5] is the next instruction set # groups[6] is a side effect of the matching groups, but isn't used # for anything # set up the loop for the next pass baseInsSet, insSetFlags, nextGroup, _, _ = groups[3:8] while baseInsSet: if insSetFlags: insSetFlags = insSetFlags.split(",") for i, flag in enumerate(insSetFlags): insSetFlags[i] = _fixup(flag) else: insSetFlags = [] set.addDep(InstructionSetDependency, Dependency(baseInsSet, insSetFlags)) if not nextGroup: break match = archGroupRegexp.match(nextGroup) # this had to match, or flavorRegexp wouldn't have assert(match) baseInsSet, insSetFlags, nextGroup, _, _ = match.groups() elif groups[2]: # mark that the user specified "is:" without any instruction set # by adding a placeholder instruction set dep class here. set.addEmptyDepClass(InstructionSetDependency) # 8 is target: 9 is target architecture. 10 is target flags # 11 is the next instruction set. 12 is just a side effect. if groups[9]: baseInsSet, insSetFlags, nextGroup = groups[9], groups[10], groups[11] while baseInsSet: if insSetFlags: insSetFlags = insSetFlags.split(",") for i, flag in enumerate(insSetFlags): insSetFlags[i] = _fixup(flag) else: insSetFlags = [] set.addDep(TargetInstructionSetDependency, Dependency(baseInsSet, insSetFlags)) if not nextGroup: break match = archGroupRegexp.match(nextGroup) # this had to match, or flavorRegexp wouldn't have assert(match) baseInsSet, insSetFlags, nextGroup, _, _ = match.groups() elif groups[8]: # mark that the user specified "target:" without any instruction set # by adding a placeholder instruction set dep class here. set.addEmptyDepClass(TargetInstructionSetDependency) if groups[1]: useFlags = groups[1].split(",") for i, flag in enumerate(useFlags): useFlags[i] = _fixup(flag) set.addDep(UseDependency, Dependency("use", useFlags)) elif groups[0]: # mark that the user specified "use:" without any instruction set # by adding a placeholder instruction set dep class here. set.addEmptyDepClass(UseDependency) return mergeFlavor(set, mergeBase) def parseDep(s): """ Parses dependency strings (not flavors) of the format (<depClass>: dep[(flags)])* and returns a dependency set containing those dependencies. Raises ParseError if the parsing fails. """ depSet = DependencySet() while s: match = depRegexp.match(s) if not match: raise ParseError, ('depString starting at %s' ' is not a valid dep string' % s) tagName = match.groups()[0] depClause = match.groups()[1] wholeMatch = match.group() s = s[len(wholeMatch):] if tagName not in dependencyClassesByName: raise ParseError, ('no such dependency class %s' % tagName) depClass = dependencyClassesByName[tagName] # depRegexp matches a generic depClass: dep(flags) set # - pass the dep to the given depClass for parsing dep = depClass.parseDep(depClause) assert(dep is not None) depSet.addDep(depClass, dep) return depSet def flavorDifferences(flavors, strict=True): """ Takes a set of flavors, returns a dict of flavors such that the value of a flavor's dict entry is a flavor that includes only the information that differentiates that flavor from others in the set @param strict: if False, ignore differences between flags where the difference is in strength of the flag, but not in direction, e.g. ignore ~foo vs. foo, but not ~foo vs. ~!foo. """ if not flavors: return {} diffs = {} flavors = list(flavors) base = flavors[0].copy() # the intersection of all the flavors will provide the largest common # flavor that is shared between all the flavors given for flavor in flavors[1:]: base = base.intersection(flavor, strict=strict) # remove the common flavor bits for flavor in flavors: diffs[flavor] = flavor.difference(base, strict=strict) return diffs def compatibleFlavors(flavor1, flavor2): """ Return True if flavor1 does not have any flavor that switches polarity from ~foo to ~!foo, or foo to !foo, and flavor1 does not have any architectures not in flavor2 and vice versa. """ for depClass in flavor1.members.values(): otherDepClass = flavor2.members.get(depClass.tag, None) if otherDepClass is None: continue for name, dep in depClass.members.iteritems(): otherDep = otherDepClass.members.get(name, None) if otherDep is None: if depClass.justOne: continue return False for flag, sense in dep.flags.iteritems(): otherSense = otherDep.flags.get(flag, None) if otherSense is None: continue if toStrongMap[sense] != toStrongMap[otherSense]: return False return True def getMinimalFlagChanges(dep, depToMatch): if not dep: return [ (flag, FLAG_SENSE_PREFERRED) for (flag,sense) in depToMatch.getFlags()[0] if sense == FLAG_SENSE_REQUIRED ] toAdd = [] for flag, sense in depToMatch.getFlags()[0]: mySense = dep.flags.get(flag, FLAG_SENSE_UNSPECIFIED) if sense == FLAG_SENSE_REQUIRED: # we must provide this flag and it must not be # DISALLOWED if mySense in (FLAG_SENSE_UNSPECIFIED, FLAG_SENSE_DISALLOWED): toAdd.append((flag, FLAG_SENSE_PREFERRED)) elif sense == FLAG_SENSE_PREFERRED: if mySense == FLAG_SENSE_DISALLOWED: toAdd.append((flag, FLAG_SENSE_PREFERRED)) elif sense == FLAG_SENSE_PREFERNOT: if mySense == FLAG_SENSE_REQUIRED: toAdd.append((flag, FLAG_SENSE_PREFERNOT)) elif sense == FLAG_SENSE_DISALLOWED: if mySense in (FLAG_SENSE_PREFERRED, FLAG_SENSE_REQUIRED): toAdd.append((flag, FLAG_SENSE_PREFERNOT)) return toAdd def getMinimalCompatibleChanges(flavor, flavorToMatch, keepArch=False): useFlags = list(flavorToMatch.iterDepsByClass(UseDependency)) insDeps = list(flavorToMatch.iterDepsByClass(InstructionSetDependency)) targetDeps = list(flavorToMatch.iterDepsByClass( TargetInstructionSetDependency)) myUseFlags = list(flavor.iterDepsByClass(UseDependency)) myInsDeps = list(flavor.iterDepsByClass(InstructionSetDependency)) myTargetDeps = list(flavor.iterDepsByClass( TargetInstructionSetDependency)) finalFlavor = Flavor() if useFlags: useFlags = useFlags[0] if myUseFlags: myUseFlags = myUseFlags[0] flagsNeeded = getMinimalFlagChanges(myUseFlags, useFlags) if flagsNeeded: useDep = Dependency('use', flagsNeeded) finalFlavor.addDep(UseDependency, useDep) for (depClass, toMatchDeps, myDeps) in ((InstructionSetDependency, insDeps, myInsDeps), (TargetInstructionSetDependency, targetDeps, myTargetDeps)): myDeps = dict((x.name, x) for x in myDeps) for dep in toMatchDeps: myDep = myDeps.get(dep.name, None) flagsNeeded = getMinimalFlagChanges(myDep, dep) if myDep is None or flagsNeeded or keepArch: insDep = Dependency(dep.name, flagsNeeded) finalFlavor.addDep(depClass, insDep) return finalFlavor def getUseFlags(flavor): deps = list(flavor.iterDepsByClass(UseDependency)) if not deps: return {} return deps[0].getFlags()[0] @api.developerApi def getMajorArch(flavor): from conary.deps import arch majorArch = arch.getMajorArch( flavor.iterDepsByClass(InstructionSetDependency)) if majorArch: return majorArch.name @api.developerApi def getShortFlavorDescriptors(flavors): contextStr = {} descriptors = {} for flavor in flavors: majorArch = getMajorArch(flavor) if majorArch: descriptors[flavor] = (majorArch,) else: descriptors[flavor] = () if len(set(descriptors.values())) != len(descriptors): differences = flavorDifferences(flavors, strict=False) for flavor, shortenedFlavor in differences.iteritems(): useFlags = getUseFlags(shortenedFlavor) positiveFlags = sorted(x[0] for x in useFlags if x[1] in (FLAG_SENSE_PREFERRED, FLAG_SENSE_REQUIRED)) descriptors[flavor] = descriptors[flavor] + tuple(positiveFlags) if len(set(descriptors.values())) != len(descriptors): # at this point the only differences are between # prefers + requires and prefernot and requirenot and missing differences = flavorDifferences(flavors, strict=True) for flavor, shortenedFlavor in differences.iteritems(): majorArch = getMajorArch(flavor) veryShortFlavor = Flavor() useFlags = list(shortenedFlavor.iterDepsByClass(UseDependency)) if useFlags: veryShortFlavor.addDeps(UseDependency, useFlags) if majorArch: veryShortFlavor.addDep(InstructionSetDependency, Dependency(majorArch)) descriptors[flavor] = tuple(str(veryShortFlavor).split(',')) if len(set(descriptors.values())) == len(set(descriptors)): return dict((x[0], '-'.join(x[1])) for x in descriptors.iteritems()) raise NotImplementedError dependencyCache = weakref.WeakValueDictionary() ident = '(?:[0-9A-Za-z_-]+)' flag = '(?:~?!?IDENT)' useFlag = '(?:!|~!)?FLAG(?:\.IDENT)?' archFlags = '\(( *FLAG(?: *, *FLAG)*)\)' archClause = ' *(?:(IDENT)(?:ARCHFLAGS)?)?' archGroup = '(?:ARCHCLAUSE(?:((?: *ARCHCLAUSE)*))?)' useClause = '(USEFLAG *(?:, *USEFLAG)*)?' depFlags = ' *(?:\([^)]*\))? *' # anything inside parens depName = r'(?:[^ (]+)' # anything except for a space or an opening paren depClause = depName + depFlags depRegexpStr = r'(IDENT): *(DEPCLAUSE) *' flavorRegexpStr = '^(use:)? *(?:USECLAUSE)? *(?:(is:) *ARCHGROUP)? *(?:(target:) *ARCHGROUP)?$' flavorRegexpStr = flavorRegexpStr.replace('ARCHGROUP', archGroup) flavorRegexpStr = flavorRegexpStr.replace('ARCHCLAUSE', archClause) flavorRegexpStr = flavorRegexpStr.replace('ARCHFLAGS', archFlags) flavorRegexpStr = flavorRegexpStr.replace('USECLAUSE', useClause) flavorRegexpStr = flavorRegexpStr.replace('USEFLAG', useFlag) flavorRegexpStr = flavorRegexpStr.replace('FLAG', flag) flavorRegexpStr = flavorRegexpStr.replace('IDENT', ident) flavorRegexp = re.compile(flavorRegexpStr) archGroupStr = archGroup.replace('ARCHCLAUSE', archClause) archGroupStr = archGroupStr.replace('ARCHFLAGS', archFlags) archGroupStr = archGroupStr.replace('USECLAUSE', useClause) archGroupStr = archGroupStr.replace('USEFLAG', useFlag) archGroupStr = archGroupStr.replace('FLAG', flag) archGroupStr = archGroupStr.replace('IDENT', ident) archGroupRegexp = re.compile(archGroupStr) depRegexpStr = depRegexpStr.replace('DEPCLAUSE', depClause) depRegexpStr = depRegexpStr.replace('IDENT', ident) depRegexp = re.compile(depRegexpStr) del ident, flag, useFlag, archClause, useClause, flavorRegexpStr del depFlags, depName, depClause, depRegexpStr del archGroupStr # None means disallowed match flavorScores = { (FLAG_SENSE_UNSPECIFIED, FLAG_SENSE_REQUIRED ) : None, (FLAG_SENSE_UNSPECIFIED, FLAG_SENSE_DISALLOWED): 0, (FLAG_SENSE_UNSPECIFIED, FLAG_SENSE_PREFERRED) : -1, (FLAG_SENSE_UNSPECIFIED, FLAG_SENSE_PREFERNOT) : 1, (FLAG_SENSE_REQUIRED, FLAG_SENSE_REQUIRED ) : 2, (FLAG_SENSE_REQUIRED, FLAG_SENSE_DISALLOWED): None, (FLAG_SENSE_REQUIRED, FLAG_SENSE_PREFERRED) : 1, (FLAG_SENSE_REQUIRED, FLAG_SENSE_PREFERNOT) : None, (FLAG_SENSE_DISALLOWED, FLAG_SENSE_REQUIRED ) : None, (FLAG_SENSE_DISALLOWED, FLAG_SENSE_DISALLOWED): 2, (FLAG_SENSE_DISALLOWED, FLAG_SENSE_PREFERRED) : None, (FLAG_SENSE_DISALLOWED, FLAG_SENSE_PREFERNOT) : 1, (FLAG_SENSE_PREFERRED, FLAG_SENSE_REQUIRED ) : 1, (FLAG_SENSE_PREFERRED, FLAG_SENSE_DISALLOWED): None, (FLAG_SENSE_PREFERRED, FLAG_SENSE_PREFERRED) : 2, (FLAG_SENSE_PREFERRED, FLAG_SENSE_PREFERNOT) : -1, (FLAG_SENSE_PREFERNOT, FLAG_SENSE_REQUIRED ) : -2, (FLAG_SENSE_PREFERNOT, FLAG_SENSE_DISALLOWED): 1, (FLAG_SENSE_PREFERNOT, FLAG_SENSE_PREFERRED) : -1, (FLAG_SENSE_PREFERNOT, FLAG_SENSE_PREFERNOT) : 1 }
apache-2.0
-5,890,892,836,971,758,000
33.151906
95
0.590365
false
ecdavis/pantsmud
test/pantsmud/driver/test_command_manager.py
1
7606
import mock import string from unittest import TestCase from pantsmud.driver.command import CommandManager class TestCommandManagerAdd(TestCase): def setUp(self): self.func = lambda: None self.name = "test" self.command_manager = CommandManager(self.name) def test_command_exists_after_add(self): self.assertFalse(self.command_manager.exists(self.name)) self.command_manager.add(self.name, self.func) self.assertTrue(self.command_manager.exists(self.name)) def test_add_fails_with_no_name(self): self.assertRaises(TypeError, self.command_manager.add, None, self.func) self.assertRaises(ValueError, self.command_manager.add, '', self.func) def test_add_fails_with_whitespace_in_name(self): for c in string.whitespace: self.assertRaises(ValueError, self.command_manager.add, self.name + c + self.name, self.func) def test_add_fails_with_no_func(self): self.assertRaises(TypeError, self.command_manager.add, self.name, None) def test_add_fails_with_non_callable_func(self): self.assertRaises(TypeError, self.command_manager.add, self.name, "foobar") class TestCommandManagerRun(TestCase): def setUp(self): self.func = lambda: None self.name = "test" self.command_manager = CommandManager(self.name) def test_command_function_is_called_by_run(self): func = mock.MagicMock() func.__name__ = "func" actor = mock.MagicMock() self.command_manager.add(self.name, func) self.command_manager.run(actor, self.name, None) func.assert_called_once_with(actor, self.name, None) def test_run_fails_if_command_does_not_exist(self): actor = mock.MagicMock() self.assertRaises(KeyError, self.command_manager.run, actor, self.name, None) def test_run_fails_with_no_actor(self): self.command_manager.add(self.name, self.func) self.assertRaises(TypeError, self.command_manager.run, None, self.name, None) def test_run_fails_when_actor_has_no_environment(self): self.command_manager.add(self.name, self.func) actor = mock.MagicMock() actor.environment = None self.assertRaises(ValueError, self.command_manager.run, actor, self.name, None) def test_run_suppresses_exceptions_in_command_func(self): def raiser(actor, cmd, args): raise Exception() actor = mock.MagicMock() self.command_manager.add(self.name, raiser) try: self.command_manager.run(actor, self.name, "") except Exception: self.fail("CommandManager.run must catch all exceptions raised by command functions.") class TestCommandManagerInputHandler(TestCase): def setUp(self): self.func = lambda: None self.name = "test" self.command_manager = CommandManager(self.name) def test_command_function_is_called_by_input_handler(self): func = mock.MagicMock() func.__name__ = "func" actor = mock.MagicMock() self.command_manager.add(self.name, func) self.command_manager.input_handler(actor, self.name) func.assert_called_once_with(actor, self.name, '') def test_input_handler_does_not_run_command_with_no_input(self): func = mock.MagicMock() func.__name__ = "func" self.command_manager.add(self.name, func) self.command_manager.input_handler(None, None) self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the input is None.") self.command_manager.input_handler(None, "") self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the input is an empty string.") def test_input_handler_does_not_run_command_with_only_whitespace_input(self): func = mock.MagicMock() func.__name__ = "func" self.command_manager.add(self.name, func) self.command_manager.input_handler(None, string.whitespace) self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the input is only whitespace.") def test_input_handler_does_not_run_command_when_input_begins_with_whitespace(self): func = mock.MagicMock() func.__name__ = "func" self.command_manager.add(self.name, func) for c in string.whitespace: self.command_manager.input_handler(mock.MagicMock(), c + self.name) self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the first character is whitespace.") def test_input_handler_does_not_run_command_when_input_begins_with_digit(self): func = mock.MagicMock() func.__name__ = "func" self.command_manager.add(self.name, func) for c in string.digits: self.command_manager.input_handler(mock.MagicMock(), c + self.name) self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the first character is a digit.") def test_input_handler_does_not_run_command_when_input_begins_with_punctuation(self): func = mock.MagicMock() func.__name__ = "func" self.command_manager.add(self.name, func) for c in string.punctuation: self.command_manager.input_handler(mock.MagicMock(), c + self.name) self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the first character is a digit.") def test_input_handler_does_not_run_command_when_input_contains_invalid_characters(self): func = mock.MagicMock() func.__name__ = "func" self.command_manager.add(self.name, func) line = self.name + " foo\t\r\n\tbar" self.command_manager.input_handler(mock.MagicMock(), line) self.assertEqual(func.call_count, 0, "CommandManager.input_handler must not run a command if the input contains invalid characters.") def test_input_handler_strips_whitespace_and_runs_command_when_input_ends_with_whitespace(self): func = mock.MagicMock() func.__name__ = "func" actor = mock.MagicMock() self.command_manager.add(self.name, func) self.command_manager.input_handler(actor, self.name + string.whitespace) func.assert_called_once_with(actor, self.name, '') def test_input_handler_sends_message_on_invalid_input(self): actor = mock.MagicMock() self.command_manager.input_handler(actor, "foobar\t") self.assertEqual(actor.message.call_count, 1, "CommandManager.input_handler must message the actor if the input is invalid.") self.command_manager.input_handler(actor, "\tfoobar") self.assertEqual(actor.message.call_count, 2, "CommandManager.input_handler must message the actor if the input is invalid.") def test_input_handler_sends_command_notfound_message(self): actor = mock.MagicMock() self.command_manager.input_handler(actor, "foobar") self.assertEqual(actor.message.call_count, 1, "CommandManager.input_handler must message the actor if the command is not found.") def test_input_handler_splits_command_name_from_arguments(self): actor = mock.MagicMock() cmd = self.name args = "foo bar baz bar" line = cmd + " " + args func = mock.MagicMock() func.__name__ = func self.command_manager.add(self.name, func) self.command_manager.input_handler(actor, line) func.assert_called_once_with(actor, cmd, args)
apache-2.0
-848,871,119,468,146,700
42.215909
141
0.671444
false
evite/nudge
tests/test_publisher.py
1
17971
#!/usr/bin/env python # # Copyright (C) 2011 Evite LLC # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import datetime import time import unittest import urlparse import StringIO import nudge.validator import nudge.arg as args import nudge.json as json import nudge.publisher as sp from nudge.renderer import Json import httplib from nudge.publisher import ServicePublisher, Endpoint, Args, WSGIRequest from nudge.renderer import Result from nose.tools import raises class MockResponse(object): def __init__(self, request, code, headers={}, buffer=None, effective_url='http://test.example.com', error=None, request_time=None, time_info={}): self.request = request self.code = code self.headers = headers self.buffer = buffer self._body = None self.effective_url = 'http://test.example.com' self.request_time = request_time self.time_info = time_info def _get_body(self): return 'test stuff' body = property(_get_body) def rethrow(self): if self.error: raise self.error def start_response(self, status, headers): self.status = status self.headers=dict(headers) def write(self, content): content = ''.join(content) lines = ["HTTP/1.1 " + self.status] lines.extend(["%s: %s" % ('Content-Length', len(content))]) lines.extend(["%s: %s" % (n, v) for n, v in self.headers.iteritems()]) result = "\r\n".join(lines) + "\r\n\r\n" + content print 'self request MockResponse', self.request self.request.write(result) def __repr__(self): args = ",".join("%s=%r" % i for i in self.__dict__.iteritems()) return "%s(%s)" % (self.__class__.__name__, args) class TestRequest(dict): def __init__(self, d): super(TestRequest, self).__init__(d) self._buffer = '' def write(self, content): self._buffer += content def create_req(method, uri, version='HTTP/1.1', arguments={}, remote_ip='127.0.0.1', headers={}, body=''): env = { "REQUEST_METHOD": method.upper(), "CONTENT_TYPE": "application/json", "PATH_INFO": uri, "HTTP_HOST": "localhost", "REMOTE_ADDR": remote_ip, "wsgi.url_scheme": "http", "arguments": args, } for k, v in headers.iteritems(): env["HTTP_%s" % (k.upper())] = v env['wsgi.input'] = StringIO.StringIO(body) return TestRequest(env) def response_buf(http_status, content, content_type='application/json; charset=UTF-8', headers={}, end='\r\n'): lines = ["HTTP/1.1 " + str(http_status) + " " + httplib.responses[http_status]] content = content + end lines.extend(["%s: %s" % ('Content-Length', len(content))]) if 'Content-Type' not in headers: lines.extend(["%s: %s" % ('Content-Type', content_type)]) lines.extend(["%s: %s" % (n, v) for n, v in headers.iteritems()]) return "\r\n".join(lines) + "\r\n\r\n" + content class StupidTest(unittest.TestCase): def test_write(self): req = json.Dictomatic({"_buffer":""}) sp._write(req, "test") self.assertEqual(req._buffer, "test") def test_args(self): self.assertEqual(([], {}), sp.Args()) now = datetime.datetime.now() self.assertEqual(([], {"that":2,"theother":now}), sp.Args(that=2, theother=now)) self.assertEqual(([1,"this"], {"that":2,"theother":now}), sp.Args(1, "this", that=2, theother=now)) def test_generate_headers(self): result = sp._generate_headers('1', 200, 12, headers={"foo":"bar"}) self.assertEqual("1 200 OK\r\nContent-Length: 12\r\nfoo: bar\r\n\r\n", result) def test_gen_trace_str(self): def do_it(): return True result = sp._gen_trace_str(do_it, [], {}, "woot") self.assertEqual("do_it(): woot", result) result = sp._gen_trace_str(do_it, [], {}, u"woot") self.assertEqual(u"do_it(): woot", result) result = sp._gen_trace_str(do_it, ["yay","boo"], {"fa":"so"}, "woot") self.assertEqual("do_it(yay, boo, 'fa': 'so'): woot", result) class WSGIRequestTest(unittest.TestCase): def test_request_fail(self): class FooBar(): def read(self): return "woot" req_dict = json.Dictomatic.wrap({"QUERY_STRING":datetime.datetime.now(),"REQUEST_METHOD":"POST","HTTP_HOST":"127.0.0.1","PATH_INFO":"test","REMOTE_ADDR":"127.0.0.1","wsgi.input":FooBar(),"wsgi.url_scheme":"toast","headers":{"X-Forwarded-For":"127.0.0.1,something"}, "arguments":{},"body":'{"test":1}'}) req = sp.WSGIRequest(req_dict) def test_request(self): class FooBar(): def read(self): return "woot" req_dict = json.Dictomatic.wrap({"QUERY_STRING":"blah=woot&test=fa&test=blah","REQUEST_METHOD":"POST","HTTP_HOST":"127.0.0.1","PATH_INFO":"test","REMOTE_ADDR":"127.0.0.1","wsgi.input":FooBar(),"wsgi.url_scheme":"toast","headers":{"X-Forwarded-For":"127.0.0.1,something"}, "arguments":{},"body":'{"test":1}'}) req = sp.WSGIRequest(req_dict) self.assertEquals({"blah":["woot"],"test":["fa","blah"]}, req.arguments) self.assertTrue((time.time() - req.start_time < req.request_time())) class HandlerTest(unittest.TestCase): def test_noargs_handlersuccess(self): def handler(): return dict(called=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='GET', uri='/location', function=handler)) req = create_req('GET', '/location') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"called": 1}')) def test_multiuri_handler_noarg(self): def handler(): return dict(called=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='GET', uris=['/location', '/otherlocation'], function=handler)) req = create_req('GET', '/location') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"called": 1}')) req = create_req('GET', '/otherlocation') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"called": 1}')) def test_multiuri_handler_query_arg(self): def handler(user): return dict(name=user) sp = ServicePublisher() sp.add_endpoint(Endpoint( name='', method='GET', uris=[ '/(?P<user>.*)/profile', '/profiles/(?P<user>.*)', ], args=([args.String('user')],{}), function=handler )) req = create_req('GET', '/oneuser/profile') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"name": "oneuser"}')) req = create_req('GET', '/profiles/other_user') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"name": "other_user"}')) def test_noargs_handlersuccess_empty(self): def handler(): return None sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='GET', uri='/location', function=handler)) req = create_req('GET', '/location') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(404, '{"message": "Not Found", "code": 404}')) def test_matchfailure(self): def handler(): pass sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='GET', uri='/location', function=handler)) req = create_req('GET', '/blah') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(404, '{"message": "Not Found", "code": 404}')) def test_noargs_but_method_handlersuccess(self): def handler(): return dict(arg1=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='DELETE', uri='/location', function=handler)) req = create_req('DELETE', '/location', arguments=dict(_method=['delete'])) resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"arg1": 1}')) def test_arg_handlersuccess(self): def handler(*args, **kwargs): return dict(arg1=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='POST', uri='/location', args=([args.String('test')],{}), function=handler)) req = create_req('POST', '/location', arguments=dict(test="blah"), headers={"Content-Type":"application/json"}, body='{"test":"foo"}') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"arg1": 1}')) def test_arg_handlersuccess_nested_json(self): def handler(*args, **kwargs): return dict(arg1=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='POST', uri='/location', args=([args.String('test')],{}), function=handler)) req = create_req( 'POST', '/location', arguments=dict(test="blah"), headers={"Content-Type":"application/json"}, body='{"test":"foo", "spam":{"wonderful":true}}' ) resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"arg1": 1}')) def test_arg_handlersuccess_part_deux(self): def handler(*args, **kwargs): return dict(arg1=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='POST', uri='/location', args=([],{"test1":args.String('test', optional=True)}), function=handler)) req = create_req('POST', '/location', arguments=dict(test="blah"), headers={"Content-Type":"application/json"}, body='{"test1":"foo"}') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(200, '{"arg1": 1}')) def test_arg_handlersuccess_part_tre(self): def handler(*args, **kwargs): return dict(arg1=1) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='POST', uri='/location', args=([args.String('test')],{}), function=handler)) req = create_req('POST', '/location', arguments=dict(test="blah"), headers={"Content-Type":"application/json"}, body='{"test"="foo"}') resp = MockResponse(req, 500) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(400, '{"message": "body is not JSON", "code": 400}')) def test_prevent_json_array(self): def handler(): return [1,2,3] sp = ServicePublisher() sp.add_endpoint(Endpoint( name='', method='POST', uri='/location', function=handler )) req = create_req( 'POST', '/location', ) resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer,response_buf(500, '{"message": "Internal Server Error", "code": 500}')) class RendererTest(unittest.TestCase): def test_renderer(self): def handler(): return "new_location" def renderer(result): return Result( content='moved', content_type='text/html', headers={'Location': result }, http_status=302, ) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='GET', uri='/location', function=handler, renderer=renderer)) req = create_req('GET', '/location') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer, response_buf(302, 'moved', content_type='text/html', headers={'Location': 'new_location' }) ) def test_renderer_fail(self): def handler(): return "new_location" def renderer(result): return Result( content='moved', content_type='text/html', headers={'Location': result }, http_status=302, ) sp = ServicePublisher() sp.add_endpoint(Endpoint(name='', method='GET', uri='/location', function=handler, renderer=renderer)) req = create_req('GET', '/location') resp = MockResponse(req, 200) result = sp(req, resp.start_response) resp.write(result) self.assertEqual(req._buffer, response_buf(302, 'moved', content_type='text/html', headers={'Location': 'new_location' }) ) class CookieTest(unittest.TestCase): ''' TODO: MAYBE consider testing the various edge cases of cookies like: - multiple keys - not completely url encoded garage - unicode ''' def test_cookie(self): def handler(chocolate, hazel): return {'chocolate': chocolate, 'hazel': hazel} sp = ServicePublisher() sp.add_endpoint(Endpoint( name='', method='GET', uri='/cooookies', function=handler, args=Args( chocolate=args.Cookie('chocolate'), hazel=args.Cookie('hazel'), ) )) req = create_req('GET', '/cooookies', headers={'cookie':'chocolate=chip;hazel=nut'}) resp = MockResponse(req, 200) result = sp(req, resp.start_response) self.assertEqual( {'chocolate': 'chip', 'hazel': 'nut'}, json.json_decode(result[0]) ) class FallbackAppTest(unittest.TestCase): ''' TODO test using the post body in the fallback app. def create_req(method, uri, version='HTTP/1.1', arguments={}, remote_ip='127.0.0.1', headers={}, body=''): env = { "REQUEST_METHOD": method.upper(), "CONTENT_TYPE": "application/json", "PATH_INFO": uri, "HTTP_HOST": "localhost", "REMOTE_ADDR": remote_ip, "wsgi.url_scheme": "http", "arguments": args, } for k, v in headers.iteritems(): env["HTTP_{0}".format(k.upper())] = v env['wsgi.input'] = StringIO.StringIO(body) ''' def _fallback_app(self, env, start_resp): if env['REQUEST_METHOD'] == 'POST': # This used to fail since nudge had already read the FP body = env['wsgi.input'].read() else: body = json.json_encode({'success': True}) start_resp(200, [('Content-Type', 'application/json; charset=utf8')]) return [body + '\r\n'] def _nudge_func(self): return {'nudge': True} def test_fallback_app_used(self): endpoints = [ Endpoint( name='', method='GET', uri='/test', function=self._nudge_func, ) ] sp = ServicePublisher(endpoints=endpoints, fallbackapp=self._fallback_app) req = create_req('GET', '/not-test') resp = MockResponse(req, 200) result = sp(req, resp.start_response) self.assertEqual( {'success': True}, json.json_decode(result[0]) ) def test_fallback_app_used_post_body(self): endpoints = [ Endpoint( name='', method='GET', uri='/test', function=self._nudge_func, ) ] sp = ServicePublisher(endpoints=endpoints, fallbackapp=self._fallback_app) body = json.json_encode({'success': True}) + '\r\n' req = create_req('POST', '/not-test', body=body) resp = MockResponse(req, 200) result = sp(req, resp.start_response) self.assertEqual( {'success': True}, json.json_decode(result[0]) ) def test_fallback_app_not_used(self): endpoints = [ Endpoint( name='', method='GET', uri='/test', function=self._nudge_func, ) ] sp = ServicePublisher(endpoints=endpoints, fallbackapp=self._fallback_app) req = create_req('GET', '/test') resp = MockResponse(req, 200) result = sp(req, resp.start_response) self.assertEqual( {'nudge': True}, json.json_decode(result[0]) ) if __name__ == '__main__': unittest.main()
lgpl-2.1
1,545,504,843,691,520,500
35.158954
316
0.573925
false
ChimeraCoder/GOctober
july/game/models.py
1
8079
from collections import namedtuple import datetime import logging from django.conf import settings from django.db import models from django.db.models.signals import post_save, m2m_changed from django.dispatch import receiver from django.utils import timezone from july.people.models import Project, Location, Team, Commit, Language LOCATION_SQL = """\ SELECT july_user.location_id AS slug, people_location.name AS name, SUM(game_player.points) AS total FROM game_player, july_user, people_location WHERE game_player.user_id = july_user.id AND july_user.location_id = people_location.slug AND people_location.approved = 1 AND game_player.game_id = %s GROUP BY july_user.location_id ORDER BY total DESC LIMIT 50; """ TEAM_SQL = """\ SELECT july_user.team_id AS slug, people_team.name AS name, SUM(game_player.points) AS total FROM game_player, july_user, people_team WHERE game_player.user_id = july_user.id AND july_user.team_id = people_team.slug AND people_team.approved = 1 AND game_player.game_id = %s GROUP BY july_user.team_id ORDER BY total DESC LIMIT 50; """ # Number of commits on each day during the game HISTOGRAM = """\ SELECT count(*), DATE(people_commit.timestamp), game_game.start AS start, game_game.end AS end FROM people_commit, game_game WHERE game_game.id = %s AND people_commit.timestamp > start AND people_commit.timestamp < end GROUP BY DATE(people_commit.timestamp) LIMIT 33; """ class Game(models.Model): start = models.DateTimeField() end = models.DateTimeField() commit_points = models.IntegerField(default=1) project_points = models.IntegerField(default=10) problem_points = models.IntegerField(default=5) players = models.ManyToManyField( settings.AUTH_USER_MODEL, through='Player') boards = models.ManyToManyField(Project, through='Board') language_boards = models.ManyToManyField( Language, through='LanguageBoard') class Meta: ordering = ['-end'] get_latest_by = 'end' def __unicode__(self): if self.end.month == 8: return 'Julython %s' % self.end.year elif self.end.month == 2: return 'J(an)ulython %s' % self.end.year else: return 'Testathon %s' % self.end.year @property def locations(self): """Preform a raw query to mimic a real model.""" return Location.objects.raw(LOCATION_SQL, [self.pk]) @property def teams(self): """Preform a raw query to mimic a real model.""" return Team.objects.raw(TEAM_SQL, [self.pk]) @property def histogram(self): """Return a histogram of commits during the month""" from django.db import connection cursor = connection.cursor() cursor.execute(HISTOGRAM, [self.pk]) Day = namedtuple('Day', 'count date start end') def mdate(d): # SQLITE returns a string while mysql returns date object # so make it look the same. if isinstance(d, datetime.date): return d day = datetime.datetime.strptime(d, '%Y-%m-%d') return day.date() days = {mdate(i.date): i for i in map(Day._make, cursor.fetchall())} num_days = self.end - self.start records = [] for day_n in xrange(num_days.days + 1): day = self.start + datetime.timedelta(days=day_n) records.append(days.get(day.date(), Day(0, day.date(), '', ''))) logging.debug(records) # TODO (rmyers): This should return a json array with labels results = [int(day.count) for day in records] return results @classmethod def active(cls, now=None): """Returns the active game or None.""" if now is None: now = timezone.now() try: return cls.objects.get(start__lte=now, end__gte=now) except cls.DoesNotExist: return None @classmethod def active_or_latest(cls, now=None): """Return the an active game or the latest one.""" if now is None: now = timezone.now() game = cls.active(now) if game is None: query = cls.objects.filter(end__lte=now) if len(query): game = query[0] return game def add_points_to_board(self, commit, from_orphan=False): board, created = Board.objects.select_for_update().get_or_create( game=self, project=commit.project, defaults={'points': self.project_points + self.commit_points}) if not created and not from_orphan: board.points += self.commit_points board.save() return board def add_points_to_language_boards(self, commit): for language in commit.languages: lang, _ = Language.objects.get_or_create(name=language) language_board, created = LanguageBoard.objects. \ select_for_update().get_or_create( game=self, language=lang, defaults={'points': self.commit_points}) if not created: language_board.points += self.commit_points language_board.save() def add_points_to_player(self, board, commit): player, created = Player.objects.select_for_update().get_or_create( game=self, user=commit.user, defaults={'points': self.project_points + self.commit_points}) player.boards.add(board) if not created: # we need to get the total points for the user project_points = player.boards.all().count() * self.project_points commit_points = Commit.objects.filter( user=commit.user, timestamp__gte=self.start, timestamp__lte=self.end).count() * self.commit_points # TODO (rmyers): Add in problem points player.points = project_points + commit_points player.save() def add_commit(self, commit, from_orphan=False): """ Add a commit to the game, update the scores for the player/boards. If the commit was previously an orphan commit don't update the board total, since it was already updated. TODO (rmyers): This may need to be run by celery in the future instead of a post create signal. """ board = self.add_points_to_board(commit, from_orphan) self.add_points_to_language_boards(commit) if commit.user: self.add_points_to_player(board, commit) class Player(models.Model): """A player in the game.""" game = models.ForeignKey(Game) user = models.ForeignKey(settings.AUTH_USER_MODEL) points = models.IntegerField(default=0) boards = models.ManyToManyField('Board') class Meta: ordering = ['-points'] get_latest_by = 'game__end' def __unicode__(self): return unicode(self.user) class AbstractBoard(models.Model): """Keeps points per metric per game""" game = models.ForeignKey(Game) points = models.IntegerField(default=0) class Meta: abstract = True ordering = ['-points'] get_latest_by = 'game__end' class Board(AbstractBoard): """A project with commits in the game.""" project = models.ForeignKey(Project) def __unicode__(self): return 'Board for %s' % unicode(self.project) class LanguageBoard(AbstractBoard): """A language with commits in the game.""" language = models.ForeignKey(Language) def __unicode__(self): return 'Board for %s' % unicode(self.language) @receiver(post_save, sender=Commit) def add_commit(sender, **kwargs): """Listens for new commits and adds them to the game.""" commit = kwargs.get('instance') active_game = Game.active(now=commit.timestamp) if active_game is not None: from_orphan = not kwargs.get('created', False) active_game.add_commit(commit, from_orphan=from_orphan)
mit
-4,066,190,536,887,538,700
31.841463
78
0.621364
false
Stanford-Legal-Tech-Design/legaltech-rapidpro
temba/formax.py
2
1690
from django.core.urlresolvers import resolve from django.http import HttpResponseRedirect from django.template import RequestContext, loader import time from orgs.context_processors import user_group_perms_processor from django.conf import settings class FormaxMixin(object): def derive_formax_sections(self, formax, context): return None def get_context_data(self, *args, **kwargs): context = super(FormaxMixin, self).get_context_data(*args, **kwargs) formax = Formax(self.request) self.derive_formax_sections(formax, context) if len(formax.sections) > 0: context['formax'] = formax return context class Formax(object): def __init__(self, request): self.sections = [] self.request = request context = user_group_perms_processor(self.request) self.org = context['user_org'] def add_section(self, name, url, icon, action='formax', button='Save'): resolver = resolve(url) self.request.META['HTTP_X_FORMAX'] = 1 self.request.META['HTTP_X_PJAX'] = 1 open = self.request.REQUEST.get('open', None) if open == name: action = 'open' start = time.time() response = resolver.func(self.request, *resolver.args, **resolver.kwargs) if settings.DEBUG: print "%s took: %f" % (url, time.time() - start) # redirects don't do us any good if not isinstance(response, HttpResponseRedirect): response.render() self.sections.append(dict(name=name, url=url, response=response.content, icon=icon, action=action, button=button))
agpl-3.0
-1,518,895,608,231,386,400
32.8
84
0.630178
false
obicho/ebook
tests/test_txt_index.py
1
1704
import unittest from TxtReader import TxtReader from TxtIndex import TxtIndex class TestTxtIndex(unittest.TestCase): def setUp(self): fh = open('sense.txt') self.index = TxtIndex(fh) def testStopWordsAreExcluded(self): self.assertEquals(None, self.index.get_pointers('a')) def testGetPointers(self): pointers = self.index.get_pointers('sense') fh = open('sense.txt') self.assertGreater(len(pointers), 10) for pointer in pointers: fh.seek(pointer) found = fh.read(len('sense')) self.assertEquals(self.index.norm_word(found), 'sense') def testGetPointersIgnoreCase(self): pointers_lower_case = self.index.get_pointers('sense') pointers_proper_case = self.index.get_pointers('Sense') pointers_all_cap = self.index.get_pointers('SENSE') self.assertEquals(len(pointers_lower_case), len(pointers_proper_case)) self.assertEquals(len(pointers_proper_case), len(pointers_all_cap)) def testExactSearch(self): phrase = 'a sense of duty' pointers = self.index.exact_search(phrase) self.assertNotEquals(pointers, None) fh = open('sense.txt') pointer = pointers[0] fh.seek(pointer) rs = fh.read(len(phrase)) self.assertEquals(rs, phrase) def testExactSearchWithTrailingPunctuation(self): phrase = 'Elinor had no sense of fatigue' pointers = self.index.exact_search(phrase) self.assertNotEquals(pointers, None) fh = open('sense.txt') pointer = pointers[0] fh.seek(pointer) rs = fh.read(len(phrase)) self.assertEquals(rs, phrase)
apache-2.0
3,902,883,459,808,402,400
35.255319
78
0.641432
false
hawk31/pyGPGO
examples/example2d.py
1
3132
####################################### # pyGPGO examples # example2d: SHows how the Bayesian Optimization works on a two-dimensional # rastrigin function, step by step. ####################################### import os from collections import OrderedDict import numpy as np import matplotlib.pyplot as plt from pyGPGO.GPGO import GPGO from pyGPGO.surrogates.GaussianProcess import GaussianProcess from pyGPGO.acquisition import Acquisition from pyGPGO.covfunc import squaredExponential def rastrigin(x, y, A=10): return (2 * A + (x ** 2 - A * np.cos(2 * np.pi * x)) + (y ** 2 - A * np.cos(2 * np.pi * y))) def plot_f(x_values, y_values, f): z = np.zeros((len(x_values), len(y_values))) for i in range(len(x_values)): for j in range(len(y_values)): z[i, j] = f(x_values[i], y_values[j]) plt.imshow(z.T, origin='lower', extent=[np.min(x_values), np.max(x_values), np.min(y_values), np.max(y_values)]) plt.colorbar() plt.show() plt.savefig(os.path.join(os.getcwd(), 'mthesis_text/figures/chapter3/rosen/rosen.pdf')) def plot2dgpgo(gpgo): tested_X = gpgo.GP.X n = 100 r_x, r_y = gpgo.parameter_range[0], gpgo.parameter_range[1] x_test = np.linspace(r_x[0], r_x[1], n) y_test = np.linspace(r_y[0], r_y[1], n) z_hat = np.empty((len(x_test), len(y_test))) z_var = np.empty((len(x_test), len(y_test))) ac = np.empty((len(x_test), len(y_test))) for i in range(len(x_test)): for j in range(len(y_test)): res = gpgo.GP.predict([x_test[i], y_test[j]]) z_hat[i, j] = res[0] z_var[i, j] = res[1][0] ac[i, j] = -gpgo._acqWrapper(np.atleast_1d([x_test[i], y_test[j]])) fig = plt.figure() a = fig.add_subplot(2, 2, 1) a.set_title('Posterior mean') plt.imshow(z_hat.T, origin='lower', extent=[r_x[0], r_x[1], r_y[0], r_y[1]]) plt.colorbar() plt.plot(tested_X[:, 0], tested_X[:, 1], 'wx', markersize=10) a = fig.add_subplot(2, 2, 2) a.set_title('Posterior variance') plt.imshow(z_var.T, origin='lower', extent=[r_x[0], r_x[1], r_y[0], r_y[1]]) plt.plot(tested_X[:, 0], tested_X[:, 1], 'wx', markersize=10) plt.colorbar() a = fig.add_subplot(2, 2, 3) a.set_title('Acquisition function') plt.imshow(ac.T, origin='lower', extent=[r_x[0], r_x[1], r_y[0], r_y[1]]) plt.colorbar() gpgo._optimizeAcq(method='L-BFGS-B', n_start=500) plt.plot(gpgo.best[0], gpgo.best[1], 'gx', markersize=15) plt.tight_layout() plt.savefig(os.path.join(os.getcwd(), 'mthesis_text/figures/chapter3/rosen/{}.pdf'.format(item))) plt.show() if __name__ == '__main__': x = np.linspace(-1, 1, 1000) y = np.linspace(-1, 1, 1000) plot_f(x, y, rastrigin) np.random.seed(20) sexp = squaredExponential() gp = GaussianProcess(sexp) acq = Acquisition(mode='ExpectedImprovement') param = OrderedDict() param['x'] = ('cont', [-1, 1]) param['y'] = ('cont', [-1, 1]) gpgo = GPGO(gp, acq, rastrigin, param, n_jobs=-1) gpgo._firstRun() for item in range(7): plot2dgpgo(gpgo) gpgo.updateGP()
mit
2,945,179,284,582,613,500
33.417582
116
0.581098
false
ZeitOnline/zeit.content.quiz
src/zeit/content/quiz/updater.py
1
1196
import urllib import urllib2 import zope.app.appsetup.product import zope.interface import zope.component import zeit.content.quiz.interfaces class Updater(object): zope.component.adapts(zeit.content.quiz.interfaces.IQuiz) zope.interface.implements(zeit.content.quiz.interfaces.IQuizUpdater) def __init__(self, context): self.context = context def update(self): url = self.get_url() if url: urllib2.urlopen(url, self.get_data()) def get_url(self): config = zope.app.appsetup.product.getProductConfiguration( 'zeit.content.quiz') if config: return config.get('url') def get_data(self): data = dict( quiz_id=self.context.uniqueId.replace('http://xml.zeit.de', '', 1), action='preview', xml=zeit.cms.content.interfaces.IXMLSource(self.context)) return urllib.urlencode(sorted(data.items())) @zope.component.adapter( zeit.content.quiz.interfaces.IQuiz, zeit.cms.checkout.interfaces.IAfterCheckinEvent) def update_after_checkin(context, event): updater = zeit.content.quiz.interfaces.IQuizUpdater(context) updater.update()
bsd-3-clause
7,931,774,756,033,927,000
27.47619
79
0.672241
false
rutsky/letsencrypt
acme/acme/client_test.py
1
24071
"""Tests for acme.client.""" import datetime import json import unittest from six.moves import http_client # pylint: disable=import-error import mock import requests from acme import challenges from acme import errors from acme import jose from acme import jws as acme_jws from acme import messages from acme import messages_test from acme import test_util CERT_DER = test_util.load_vector('cert.der') KEY = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem')) KEY2 = jose.JWKRSA.load(test_util.load_vector('rsa256_key.pem')) class ClientTest(unittest.TestCase): """Tests for acme.client.Client.""" # pylint: disable=too-many-instance-attributes,too-many-public-methods def setUp(self): self.response = mock.MagicMock( ok=True, status_code=http_client.OK, headers={}, links={}) self.net = mock.MagicMock() self.net.post.return_value = self.response self.net.get.return_value = self.response self.directory = messages.Directory({ messages.NewRegistration: 'https://www.letsencrypt-demo.org/acme/new-reg', messages.Revocation: 'https://www.letsencrypt-demo.org/acme/revoke-cert', }) from acme.client import Client self.client = Client( directory=self.directory, key=KEY, alg=jose.RS256, net=self.net) self.identifier = messages.Identifier( typ=messages.IDENTIFIER_FQDN, value='example.com') # Registration self.contact = ('mailto:[email protected]', 'tel:+12025551212') reg = messages.Registration( contact=self.contact, key=KEY.public_key()) self.new_reg = messages.NewRegistration(**dict(reg)) self.regr = messages.RegistrationResource( body=reg, uri='https://www.letsencrypt-demo.org/acme/reg/1', new_authzr_uri='https://www.letsencrypt-demo.org/acme/new-reg', terms_of_service='https://www.letsencrypt-demo.org/tos') # Authorization authzr_uri = 'https://www.letsencrypt-demo.org/acme/authz/1' challb = messages.ChallengeBody( uri=(authzr_uri + '/1'), status=messages.STATUS_VALID, chall=challenges.DNS(token='foo')) self.challr = messages.ChallengeResource( body=challb, authzr_uri=authzr_uri) self.authz = messages.Authorization( identifier=messages.Identifier( typ=messages.IDENTIFIER_FQDN, value='example.com'), challenges=(challb,), combinations=None) self.authzr = messages.AuthorizationResource( body=self.authz, uri=authzr_uri, new_cert_uri='https://www.letsencrypt-demo.org/acme/new-cert') # Request issuance self.certr = messages.CertificateResource( body=messages_test.CERT, authzrs=(self.authzr,), uri='https://www.letsencrypt-demo.org/acme/cert/1', cert_chain_uri='https://www.letsencrypt-demo.org/ca') def test_init_downloads_directory(self): uri = 'http://www.letsencrypt-demo.org/directory' from acme.client import Client self.client = Client( directory=uri, key=KEY, alg=jose.RS256, net=self.net) self.net.get.assert_called_once_with(uri) def test_register(self): # "Instance of 'Field' has no to_json/update member" bug: # pylint: disable=no-member self.response.status_code = http_client.CREATED self.response.json.return_value = self.regr.body.to_json() self.response.headers['Location'] = self.regr.uri self.response.links.update({ 'next': {'url': self.regr.new_authzr_uri}, 'terms-of-service': {'url': self.regr.terms_of_service}, }) self.assertEqual(self.regr, self.client.register(self.new_reg)) # TODO: test POST call arguments # TODO: split here and separate test reg_wrong_key = self.regr.body.update(key=KEY2.public_key()) self.response.json.return_value = reg_wrong_key.to_json() self.assertRaises( errors.UnexpectedUpdate, self.client.register, self.new_reg) def test_register_missing_next(self): self.response.status_code = http_client.CREATED self.assertRaises( errors.ClientError, self.client.register, self.new_reg) def test_update_registration(self): # "Instance of 'Field' has no to_json/update member" bug: # pylint: disable=no-member self.response.headers['Location'] = self.regr.uri self.response.json.return_value = self.regr.body.to_json() self.assertEqual(self.regr, self.client.update_registration(self.regr)) # TODO: test POST call arguments # TODO: split here and separate test self.response.json.return_value = self.regr.body.update( contact=()).to_json() self.assertRaises( errors.UnexpectedUpdate, self.client.update_registration, self.regr) def test_query_registration(self): self.response.json.return_value = self.regr.body.to_json() self.assertEqual(self.regr, self.client.query_registration(self.regr)) def test_agree_to_tos(self): self.client.update_registration = mock.Mock() self.client.agree_to_tos(self.regr) regr = self.client.update_registration.call_args[0][0] self.assertEqual(self.regr.terms_of_service, regr.body.agreement) def test_request_challenges(self): self.response.status_code = http_client.CREATED self.response.headers['Location'] = self.authzr.uri self.response.json.return_value = self.authz.to_json() self.response.links = { 'next': {'url': self.authzr.new_cert_uri}, } self.client.request_challenges(self.identifier, self.authzr.uri) # TODO: test POST call arguments # TODO: split here and separate test self.response.json.return_value = self.authz.update( identifier=self.identifier.update(value='foo')).to_json() self.assertRaises( errors.UnexpectedUpdate, self.client.request_challenges, self.identifier, self.authzr.uri) def test_request_challenges_missing_next(self): self.response.status_code = http_client.CREATED self.assertRaises( errors.ClientError, self.client.request_challenges, self.identifier, self.regr) def test_request_domain_challenges(self): self.client.request_challenges = mock.MagicMock() self.assertEqual( self.client.request_challenges(self.identifier), self.client.request_domain_challenges('example.com', self.regr)) def test_answer_challenge(self): self.response.links['up'] = {'url': self.challr.authzr_uri} self.response.json.return_value = self.challr.body.to_json() chall_response = challenges.DNSResponse() self.client.answer_challenge(self.challr.body, chall_response) # TODO: split here and separate test self.assertRaises(errors.UnexpectedUpdate, self.client.answer_challenge, self.challr.body.update(uri='foo'), chall_response) def test_answer_challenge_missing_next(self): self.assertRaises(errors.ClientError, self.client.answer_challenge, self.challr.body, challenges.DNSResponse()) def test_retry_after_date(self): self.response.headers['Retry-After'] = 'Fri, 31 Dec 1999 23:59:59 GMT' self.assertEqual( datetime.datetime(1999, 12, 31, 23, 59, 59), self.client.retry_after(response=self.response, default=10)) @mock.patch('acme.client.datetime') def test_retry_after_invalid(self, dt_mock): dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27) dt_mock.timedelta = datetime.timedelta self.response.headers['Retry-After'] = 'foooo' self.assertEqual( datetime.datetime(2015, 3, 27, 0, 0, 10), self.client.retry_after(response=self.response, default=10)) @mock.patch('acme.client.datetime') def test_retry_after_seconds(self, dt_mock): dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27) dt_mock.timedelta = datetime.timedelta self.response.headers['Retry-After'] = '50' self.assertEqual( datetime.datetime(2015, 3, 27, 0, 0, 50), self.client.retry_after(response=self.response, default=10)) @mock.patch('acme.client.datetime') def test_retry_after_missing(self, dt_mock): dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27) dt_mock.timedelta = datetime.timedelta self.assertEqual( datetime.datetime(2015, 3, 27, 0, 0, 10), self.client.retry_after(response=self.response, default=10)) def test_poll(self): self.response.json.return_value = self.authzr.body.to_json() self.assertEqual((self.authzr, self.response), self.client.poll(self.authzr)) # TODO: split here and separate test self.response.json.return_value = self.authz.update( identifier=self.identifier.update(value='foo')).to_json() self.assertRaises( errors.UnexpectedUpdate, self.client.poll, self.authzr) def test_request_issuance(self): self.response.content = CERT_DER self.response.headers['Location'] = self.certr.uri self.response.links['up'] = {'url': self.certr.cert_chain_uri} self.assertEqual(self.certr, self.client.request_issuance( messages_test.CSR, (self.authzr,))) # TODO: check POST args def test_request_issuance_missing_up(self): self.response.content = CERT_DER self.response.headers['Location'] = self.certr.uri self.assertEqual( self.certr.update(cert_chain_uri=None), self.client.request_issuance(messages_test.CSR, (self.authzr,))) def test_request_issuance_missing_location(self): self.assertRaises( errors.ClientError, self.client.request_issuance, messages_test.CSR, (self.authzr,)) @mock.patch('acme.client.datetime') @mock.patch('acme.client.time') def test_poll_and_request_issuance(self, time_mock, dt_mock): # clock.dt | pylint: disable=no-member clock = mock.MagicMock(dt=datetime.datetime(2015, 3, 27)) def sleep(seconds): """increment clock""" clock.dt += datetime.timedelta(seconds=seconds) time_mock.sleep.side_effect = sleep def now(): """return current clock value""" return clock.dt dt_mock.datetime.now.side_effect = now dt_mock.timedelta = datetime.timedelta def poll(authzr): # pylint: disable=missing-docstring # record poll start time based on the current clock value authzr.times.append(clock.dt) # suppose it takes 2 seconds for server to produce the # result, increment clock clock.dt += datetime.timedelta(seconds=2) if not authzr.retries: # no more retries done = mock.MagicMock(uri=authzr.uri, times=authzr.times) done.body.status = messages.STATUS_VALID return done, [] # response (2nd result tuple element) is reduced to only # Retry-After header contents represented as integer # seconds; authzr.retries is a list of Retry-After # headers, head(retries) is peeled of as a current # Retry-After header, and tail(retries) is persisted for # later poll() calls return (mock.MagicMock(retries=authzr.retries[1:], uri=authzr.uri + '.', times=authzr.times), authzr.retries[0]) self.client.poll = mock.MagicMock(side_effect=poll) mintime = 7 def retry_after(response, default): # pylint: disable=missing-docstring # check that poll_and_request_issuance correctly passes mintime self.assertEqual(default, mintime) return clock.dt + datetime.timedelta(seconds=response) self.client.retry_after = mock.MagicMock(side_effect=retry_after) def request_issuance(csr, authzrs): # pylint: disable=missing-docstring return csr, authzrs self.client.request_issuance = mock.MagicMock( side_effect=request_issuance) csr = mock.MagicMock() authzrs = ( mock.MagicMock(uri='a', times=[], retries=(8, 20, 30)), mock.MagicMock(uri='b', times=[], retries=(5,)), ) cert, updated_authzrs = self.client.poll_and_request_issuance( csr, authzrs, mintime=mintime) self.assertTrue(cert[0] is csr) self.assertTrue(cert[1] is updated_authzrs) self.assertEqual(updated_authzrs[0].uri, 'a...') self.assertEqual(updated_authzrs[1].uri, 'b.') self.assertEqual(updated_authzrs[0].times, [ datetime.datetime(2015, 3, 27), # a is scheduled for 10, but b is polling [9..11), so it # will be picked up as soon as b is finished, without # additional sleeping datetime.datetime(2015, 3, 27, 0, 0, 11), datetime.datetime(2015, 3, 27, 0, 0, 33), datetime.datetime(2015, 3, 27, 0, 1, 5), ]) self.assertEqual(updated_authzrs[1].times, [ datetime.datetime(2015, 3, 27, 0, 0, 2), datetime.datetime(2015, 3, 27, 0, 0, 9), ]) self.assertEqual(clock.dt, datetime.datetime(2015, 3, 27, 0, 1, 7)) def test_check_cert(self): self.response.headers['Location'] = self.certr.uri self.response.content = CERT_DER self.assertEqual(self.certr.update(body=messages_test.CERT), self.client.check_cert(self.certr)) # TODO: split here and separate test self.response.headers['Location'] = 'foo' self.assertRaises( errors.UnexpectedUpdate, self.client.check_cert, self.certr) def test_check_cert_missing_location(self): self.response.content = CERT_DER self.assertRaises( errors.ClientError, self.client.check_cert, self.certr) def test_refresh(self): self.client.check_cert = mock.MagicMock() self.assertEqual( self.client.check_cert(self.certr), self.client.refresh(self.certr)) def test_fetch_chain(self): # pylint: disable=protected-access self.client._get_cert = mock.MagicMock() self.client._get_cert.return_value = ("response", "certificate") self.assertEqual(self.client._get_cert(self.certr.cert_chain_uri)[1], self.client.fetch_chain(self.certr)) def test_fetch_chain_no_up_link(self): self.assertTrue(self.client.fetch_chain(self.certr.update( cert_chain_uri=None)) is None) def test_revoke(self): self.client.revoke(self.certr.body) self.net.post.assert_called_once_with( self.directory[messages.Revocation], mock.ANY, content_type=None) def test_revoke_bad_status_raises_error(self): self.response.status_code = http_client.METHOD_NOT_ALLOWED self.assertRaises(errors.ClientError, self.client.revoke, self.certr) class ClientNetworkTest(unittest.TestCase): """Tests for acme.client.ClientNetwork.""" def setUp(self): self.verify_ssl = mock.MagicMock() self.wrap_in_jws = mock.MagicMock(return_value=mock.sentinel.wrapped) from acme.client import ClientNetwork self.net = ClientNetwork( key=KEY, alg=jose.RS256, verify_ssl=self.verify_ssl) self.response = mock.MagicMock(ok=True, status_code=http_client.OK) self.response.headers = {} self.response.links = {} def test_init(self): self.assertTrue(self.net.verify_ssl is self.verify_ssl) def test_wrap_in_jws(self): class MockJSONDeSerializable(jose.JSONDeSerializable): # pylint: disable=missing-docstring def __init__(self, value): self.value = value def to_partial_json(self): return {'foo': self.value} @classmethod def from_json(cls, value): pass # pragma: no cover # pylint: disable=protected-access jws_dump = self.net._wrap_in_jws( MockJSONDeSerializable('foo'), nonce=b'Tg') jws = acme_jws.JWS.json_loads(jws_dump) self.assertEqual(json.loads(jws.payload.decode()), {'foo': 'foo'}) self.assertEqual(jws.signature.combined.nonce, b'Tg') def test_check_response_not_ok_jobj_no_error(self): self.response.ok = False self.response.json.return_value = {} # pylint: disable=protected-access self.assertRaises( errors.ClientError, self.net._check_response, self.response) def test_check_response_not_ok_jobj_error(self): self.response.ok = False self.response.json.return_value = messages.Error( detail='foo', typ='serverInternal', title='some title').to_json() # pylint: disable=protected-access self.assertRaises( messages.Error, self.net._check_response, self.response) def test_check_response_not_ok_no_jobj(self): self.response.ok = False self.response.json.side_effect = ValueError # pylint: disable=protected-access self.assertRaises( errors.ClientError, self.net._check_response, self.response) def test_check_response_ok_no_jobj_ct_required(self): self.response.json.side_effect = ValueError for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']: self.response.headers['Content-Type'] = response_ct # pylint: disable=protected-access self.assertRaises( errors.ClientError, self.net._check_response, self.response, content_type=self.net.JSON_CONTENT_TYPE) def test_check_response_ok_no_jobj_no_ct(self): self.response.json.side_effect = ValueError for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']: self.response.headers['Content-Type'] = response_ct # pylint: disable=protected-access,no-value-for-parameter self.assertEqual( self.response, self.net._check_response(self.response)) def test_check_response_jobj(self): self.response.json.return_value = {} for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']: self.response.headers['Content-Type'] = response_ct # pylint: disable=protected-access,no-value-for-parameter self.assertEqual( self.response, self.net._check_response(self.response)) @mock.patch('acme.client.requests') def test_send_request(self, mock_requests): mock_requests.request.return_value = self.response # pylint: disable=protected-access self.assertEqual(self.response, self.net._send_request( 'HEAD', 'url', 'foo', bar='baz')) mock_requests.request.assert_called_once_with( 'HEAD', 'url', 'foo', verify=mock.ANY, bar='baz') @mock.patch('acme.client.requests') def test_send_request_verify_ssl(self, mock_requests): # pylint: disable=protected-access for verify in True, False: mock_requests.request.reset_mock() mock_requests.request.return_value = self.response self.net.verify_ssl = verify # pylint: disable=protected-access self.assertEqual( self.response, self.net._send_request('GET', 'url')) mock_requests.request.assert_called_once_with( 'GET', 'url', verify=verify) @mock.patch('acme.client.requests') def test_requests_error_passthrough(self, mock_requests): mock_requests.exceptions = requests.exceptions mock_requests.request.side_effect = requests.exceptions.RequestException # pylint: disable=protected-access self.assertRaises(requests.exceptions.RequestException, self.net._send_request, 'GET', 'uri') class ClientNetworkWithMockedResponseTest(unittest.TestCase): """Tests for acme.client.ClientNetwork which mock out response.""" # pylint: disable=too-many-instance-attributes def setUp(self): from acme.client import ClientNetwork self.net = ClientNetwork(key=None, alg=None) self.response = mock.MagicMock(ok=True, status_code=http_client.OK) self.response.headers = {} self.response.links = {} self.checked_response = mock.MagicMock() self.obj = mock.MagicMock() self.wrapped_obj = mock.MagicMock() self.content_type = mock.sentinel.content_type self.all_nonces = [jose.b64encode(b'Nonce'), jose.b64encode(b'Nonce2')] self.available_nonces = self.all_nonces[:] def send_request(*args, **kwargs): # pylint: disable=unused-argument,missing-docstring if self.available_nonces: self.response.headers = { self.net.REPLAY_NONCE_HEADER: self.available_nonces.pop().decode()} else: self.response.headers = {} return self.response # pylint: disable=protected-access self.net._send_request = self.send_request = mock.MagicMock( side_effect=send_request) self.net._check_response = self.check_response self.net._wrap_in_jws = mock.MagicMock(return_value=self.wrapped_obj) def check_response(self, response, content_type): # pylint: disable=missing-docstring self.assertEqual(self.response, response) self.assertEqual(self.content_type, content_type) return self.checked_response def test_head(self): self.assertEqual(self.response, self.net.head('url', 'foo', bar='baz')) self.send_request.assert_called_once_with( 'HEAD', 'url', 'foo', bar='baz') def test_get(self): self.assertEqual(self.checked_response, self.net.get( 'url', content_type=self.content_type, bar='baz')) self.send_request.assert_called_once_with('GET', 'url', bar='baz') def test_post(self): # pylint: disable=protected-access self.assertEqual(self.checked_response, self.net.post( 'uri', self.obj, content_type=self.content_type)) self.net._wrap_in_jws.assert_called_once_with( self.obj, jose.b64decode(self.all_nonces.pop())) assert not self.available_nonces self.assertRaises(errors.MissingNonce, self.net.post, 'uri', self.obj, content_type=self.content_type) self.net._wrap_in_jws.assert_called_with( self.obj, jose.b64decode(self.all_nonces.pop())) def test_post_wrong_initial_nonce(self): # HEAD self.available_nonces = [b'f', jose.b64encode(b'good')] self.assertRaises(errors.BadNonce, self.net.post, 'uri', self.obj, content_type=self.content_type) def test_post_wrong_post_response_nonce(self): self.available_nonces = [jose.b64encode(b'good'), b'f'] self.assertRaises(errors.BadNonce, self.net.post, 'uri', self.obj, content_type=self.content_type) def test_head_get_post_error_passthrough(self): self.send_request.side_effect = requests.exceptions.RequestException for method in self.net.head, self.net.get: self.assertRaises( requests.exceptions.RequestException, method, 'GET', 'uri') self.assertRaises(requests.exceptions.RequestException, self.net.post, 'uri', obj=self.obj) if __name__ == '__main__': unittest.main() # pragma: no cover
apache-2.0
5,428,723,961,308,798,000
41.229825
86
0.630717
false
khaledhosny/psautohint
python/psautohint/otfFont.py
1
57646
# Copyright 2014 Adobe. All rights reserved. """ Utilities for converting between T2 charstrings and the bez data format. """ import copy import logging import os import re import subprocess import tempfile import itertools from fontTools.misc.psCharStrings import (T2OutlineExtractor, SimpleT2Decompiler) from fontTools.ttLib import TTFont, newTable from fontTools.misc.roundTools import noRound, otRound from fontTools.varLib.varStore import VarStoreInstancer from fontTools.varLib.cff import CFF2CharStringMergePen, MergeOutlineExtractor # import subset.cff is needed to load the implementation for # CFF.desubroutinize: the module adds this class method to the CFF and CFF2 # classes. import fontTools.subset.cff from . import fdTools, FontParseError # keep linting tools quiet about unused import assert fontTools.subset.cff is not None log = logging.getLogger(__name__) kStackLimit = 46 kStemLimit = 96 class SEACError(Exception): pass def _add_method(*clazzes): """Returns a decorator function that adds a new method to one or more classes.""" def wrapper(method): done = [] for clazz in clazzes: if clazz in done: continue # Support multiple names of a clazz done.append(clazz) assert clazz.__name__ != 'DefaultTable', \ 'Oops, table class not found.' assert not hasattr(clazz, method.__name__), \ "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) setattr(clazz, method.__name__, method) return None return wrapper def hintOn(i, hintMaskBytes): # used to add the active hints to the bez string, # when a T2 hintmask operator is encountered. byteIndex = int(i / 8) byteValue = hintMaskBytes[byteIndex] offset = 7 - (i % 8) return ((2**offset) & byteValue) > 0 class T2ToBezExtractor(T2OutlineExtractor): # The T2OutlineExtractor class calls a class method as the handler for each # T2 operator. # I use this to convert the T2 operands and arguments to bez operators. # Note: flex is converted to regular rrcurveto's. # cntrmasks just map to hint replacement blocks with the specified stems. def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, read_hints=True, round_coords=True): T2OutlineExtractor.__init__(self, None, localSubrs, globalSubrs, nominalWidthX, defaultWidthX) self.vhints = [] self.hhints = [] self.bezProgram = [] self.read_hints = read_hints self.firstMarkingOpSeen = False self.closePathSeen = False self.subrLevel = 0 self.round_coords = round_coords self.hintMaskBytes = None def execute(self, charString): self.subrLevel += 1 SimpleT2Decompiler.execute(self, charString) self.subrLevel -= 1 if (not self.closePathSeen) and (self.subrLevel == 0): self.closePath() def _point(self, point): if self.round_coords: return " ".join("%d" % round(pt) for pt in point) return " ".join("%3f" % pt for pt in point) def rMoveTo(self, point): point = self._nextPoint(point) if not self.firstMarkingOpSeen: self.firstMarkingOpSeen = True self.bezProgram.append("sc\n") log.debug("moveto %s, curpos %s", point, self.currentPoint) self.bezProgram.append("%s mt\n" % self._point(point)) self.sawMoveTo = True def rLineTo(self, point): if not self.sawMoveTo: self.rMoveTo((0, 0)) point = self._nextPoint(point) log.debug("lineto %s, curpos %s", point, self.currentPoint) self.bezProgram.append("%s dt\n" % self._point(point)) def rCurveTo(self, pt1, pt2, pt3): if not self.sawMoveTo: self.rMoveTo((0, 0)) pt1 = list(self._nextPoint(pt1)) pt2 = list(self._nextPoint(pt2)) pt3 = list(self._nextPoint(pt3)) log.debug("curveto %s %s %s, curpos %s", pt1, pt2, pt3, self.currentPoint) self.bezProgram.append("%s ct\n" % self._point(pt1 + pt2 + pt3)) def op_endchar(self, index): self.endPath() args = self.popallWidth() if args: # It is a 'seac' composite character. Don't process raise SEACError def endPath(self): # In T2 there are no open paths, so always do a closePath when # finishing a sub path. if self.sawMoveTo: log.debug("endPath") self.bezProgram.append("cp\n") self.sawMoveTo = False def closePath(self): self.closePathSeen = True log.debug("closePath") if self.bezProgram and self.bezProgram[-1] != "cp\n": self.bezProgram.append("cp\n") self.bezProgram.append("ed\n") def updateHints(self, args, hint_list, bezCommand): self.countHints(args) # first hint value is absolute hint coordinate, second is hint width if not self.read_hints: return lastval = args[0] arg = str(lastval) hint_list.append(arg) self.bezProgram.append(arg + " ") for i in range(len(args))[1:]: val = args[i] lastval += val if i % 2: arg = str(val) hint_list.append(arg) self.bezProgram.append("%s %s\n" % (arg, bezCommand)) else: arg = str(lastval) hint_list.append(arg) self.bezProgram.append(arg + " ") def op_hstem(self, index): args = self.popallWidth() self.hhints = [] self.updateHints(args, self.hhints, "rb") log.debug("hstem %s", self.hhints) def op_vstem(self, index): args = self.popallWidth() self.vhints = [] self.updateHints(args, self.vhints, "ry") log.debug("vstem %s", self.vhints) def op_hstemhm(self, index): args = self.popallWidth() self.hhints = [] self.updateHints(args, self.hhints, "rb") log.debug("stemhm %s %s", self.hhints, args) def op_vstemhm(self, index): args = self.popallWidth() self.vhints = [] self.updateHints(args, self.vhints, "ry") log.debug("vstemhm %s %s", self.vhints, args) def getCurHints(self, hintMaskBytes): curhhints = [] curvhints = [] numhhints = len(self.hhints) for i in range(int(numhhints / 2)): if hintOn(i, hintMaskBytes): curhhints.extend(self.hhints[2 * i:2 * i + 2]) numvhints = len(self.vhints) for i in range(int(numvhints / 2)): if hintOn(i + int(numhhints / 2), hintMaskBytes): curvhints.extend(self.vhints[2 * i:2 * i + 2]) return curhhints, curvhints def doMask(self, index, bezCommand): args = [] if not self.hintMaskBytes: args = self.popallWidth() if args: self.vhints = [] self.updateHints(args, self.vhints, "ry") self.hintMaskBytes = int((self.hintCount + 7) / 8) self.hintMaskString, index = self.callingStack[-1].getBytes( index, self.hintMaskBytes) if self.read_hints: curhhints, curvhints = self.getCurHints(self.hintMaskString) strout = "" mask = [strout + hex(ch) for ch in self.hintMaskString] log.debug("%s %s %s %s %s", bezCommand, mask, curhhints, curvhints, args) self.bezProgram.append("beginsubr snc\n") for i, hint in enumerate(curhhints): self.bezProgram.append("%s " % hint) if i % 2: self.bezProgram.append("rb\n") for i, hint in enumerate(curvhints): self.bezProgram.append("%s " % hint) if i % 2: self.bezProgram.append("ry\n") self.bezProgram.extend(["endsubr enc\n", "newcolors\n"]) return self.hintMaskString, index def op_hintmask(self, index): hintMaskString, index = self.doMask(index, "hintmask") return hintMaskString, index def op_cntrmask(self, index): hintMaskString, index = self.doMask(index, "cntrmask") return hintMaskString, index def countHints(self, args): self.hintCount = self.hintCount + int(len(args) / 2) def convertT2GlyphToBez(t2CharString, read_hints=True, round_coords=True): # wrapper for T2ToBezExtractor which # applies it to the supplied T2 charstring subrs = getattr(t2CharString.private, "Subrs", []) extractor = T2ToBezExtractor(subrs, t2CharString.globalSubrs, t2CharString.private.nominalWidthX, t2CharString.private.defaultWidthX, read_hints, round_coords) extractor.execute(t2CharString) t2_width_arg = None if extractor.gotWidth and (extractor.width is not None): t2_width_arg = extractor.width - t2CharString.private.nominalWidthX return "".join(extractor.bezProgram), t2_width_arg class HintMask: # class used to collect hints for the current # hint mask when converting bez to T2. def __init__(self, listPos): # The index into the t2list is kept so we can quickly find them later. # Note that t2list has one item per operator, and does not include the # initial hint operators - first op is always [rhv]moveto or endchar. self.listPos = listPos # These contain the actual hint values. self.h_list = [] self.v_list = [] self.mask = None def maskByte(self, hHints, vHints): # return hintmask bytes for known hints. num_hhints = len(hHints) num_vhints = len(vHints) self.byteLength = byteLength = int((7 + num_hhints + num_vhints) / 8) maskVal = 0 byteIndex = 0 mask = b"" if self.h_list: mask, maskVal, byteIndex = self.addMaskBits( hHints, self.h_list, 0, mask, maskVal, byteIndex) if self.v_list: mask, maskVal, byteIndex = self.addMaskBits( vHints, self.v_list, num_hhints, mask, maskVal, byteIndex) if maskVal: mask += bytes([maskVal]) if len(mask) < byteLength: mask += b"\0" * (byteLength - len(mask)) self.mask = mask return mask @staticmethod def addMaskBits(allHints, maskHints, numPriorHints, mask, maskVal, byteIndex): # sort in allhints order. sort_list = [[allHints.index(hint) + numPriorHints, hint] for hint in maskHints if hint in allHints] if not sort_list: # we get here if some hints have been dropped # because of # the stack limit, so that none of the items in maskHints are # not in allHints return mask, maskVal, byteIndex sort_list.sort() (idx_list, maskHints) = zip(*sort_list) for i in idx_list: newbyteIndex = int(i / 8) if newbyteIndex != byteIndex: mask += bytes([maskVal]) byteIndex += 1 while byteIndex < newbyteIndex: mask += b"\0" byteIndex += 1 maskVal = 0 maskVal += 2**(7 - (i % 8)) return mask, maskVal, byteIndex @property def num_bits(self): count = sum( [bin(mask_byte).count('1') for mask_byte in bytearray(self.mask)]) return count def make_hint_list(hints, need_hint_masks, is_h): # Add the list of T2 tokens that make up the initial hint operators hint_list = [] lastPos = 0 # In bez terms, the first coordinate in each pair is absolute, # second is relative. # In T2, each term is relative to the previous one. for hint in hints: if not hint: continue pos1 = hint[0] pos = pos1 - lastPos if pos % 1 == 0: pos = int(pos) hint_list.append(pos) pos2 = hint[1] if pos2 % 1 == 0: pos2 = int(pos2) lastPos = pos1 + pos2 hint_list.append(pos2) if need_hint_masks: if is_h: op = "hstemhm" hint_list.append(op) # never need to append vstemhm: if we are using it, it is followed # by a mask command and vstemhm is inferred. else: if is_h: op = "hstem" else: op = "vstem" hint_list.append(op) return hint_list bezToT2 = { "mt": 'rmoveto', "rmt": 'rmoveto', "dt": 'rlineto', "ct": 'rrcurveto', "cp": '', "ed": 'endchar' } kHintArgsNoOverlap = 0 kHintArgsOverLap = 1 kHintArgsMatch = 2 def checkStem3ArgsOverlap(arg_list, hint_list): status = kHintArgsNoOverlap for x0, x1 in arg_list: x1 = x0 + x1 for y0, y1 in hint_list: y1 = y0 + y1 if x0 == y0: if x1 == y1: status = kHintArgsMatch else: return kHintArgsOverLap elif x1 == y1: return kHintArgsOverLap else: if (x0 > y0) and (x0 < y1): return kHintArgsOverLap if (x1 > y0) and (x1 < y1): return kHintArgsOverLap return status def _add_cntr_maskHints(counter_mask_list, src_hints, is_h): for arg_list in src_hints: for mask in counter_mask_list: dst_hints = mask.h_list if is_h else mask.v_list if not dst_hints: dst_hints.extend(arg_list) overlap_status = kHintArgsMatch break overlap_status = checkStem3ArgsOverlap(arg_list, dst_hints) # The args match args in this control mask. if overlap_status == kHintArgsMatch: break if overlap_status != kHintArgsMatch: mask = HintMask(0) counter_mask_list.append(mask) dst_hints.extend(arg_list) def build_counter_mask_list(h_stem3_list, v_stem3_list): v_counter_mask = HintMask(0) h_counter_mask = v_counter_mask counter_mask_list = [h_counter_mask] _add_cntr_maskHints(counter_mask_list, h_stem3_list, is_h=True) _add_cntr_maskHints(counter_mask_list, v_stem3_list, is_h=False) return counter_mask_list def makeRelativeCTArgs(arg_list, curX, curY): newCurX = arg_list[4] newCurY = arg_list[5] arg_list[5] -= arg_list[3] arg_list[4] -= arg_list[2] arg_list[3] -= arg_list[1] arg_list[2] -= arg_list[0] arg_list[0] -= curX arg_list[1] -= curY return arg_list, newCurX, newCurY def build_hint_order(hints): # MM hints have duplicate hints. We want to return a list of indices into # the original unsorted and unfiltered list. The list should be sorted, and # should filter out duplicates num_hints = len(hints) index_list = list(range(num_hints)) hint_list = list(zip(hints, index_list)) hint_list.sort() new_hints = [hint_list[i] for i in range(1, num_hints) if hint_list[i][0] != hint_list[i - 1][0]] new_hints = [hint_list[0]] + new_hints hints, hint_order = list(zip(*new_hints)) # hints is now a list of hint pairs, sorted by increasing bottom edge. # hint_order is now a list of the hint indices from the bez file, but # sorted in the order of the hint pairs. return hints, hint_order def make_abs(hint_pair): bottom_edge, delta = hint_pair new_hint_pair = [bottom_edge, delta] if delta in [-20, -21]: # It is a ghost hint! # We use this only in comparing overlap and order: # pretend the delta is 0, as it isn't a real value. new_hint_pair[1] = bottom_edge else: new_hint_pair[1] = bottom_edge + delta return new_hint_pair def check_hint_overlap(hint_list, last_idx, bad_hint_idxs): # return True if there is an overlap. prev = hint_list[0] for i, hint_pair in enumerate(hint_list[1:], 1): if prev[1] >= hint_pair[0]: bad_hint_idxs.add(i + last_idx - 1) prev = hint_pair def check_hint_pairs(hint_pairs, mm_hint_info, last_idx=0): # pairs must be in ascending order by bottom (or left) edge, # and pairs in a hint group must not overlap. # check order first bad_hint_idxs = set() prev = hint_pairs[0] for i, hint_pair in enumerate(hint_pairs[1:], 1): if prev[0] > hint_pair[0]: # If there is a conflict, we drop the previous hint bad_hint_idxs.add(i + last_idx - 1) prev = hint_pair # check for overlap in hint groups. if mm_hint_info.hint_masks: for hint_mask in mm_hint_info.hint_masks: if last_idx == 0: hint_list = hint_mask.h_list else: hint_list = hint_mask.v_list hint_list = [make_abs(hint_pair) for hint_pair in hint_list] check_hint_overlap(hint_list, last_idx, bad_hint_idxs) else: hint_list = [make_abs(hint_pair) for hint_pair in hint_pairs] check_hint_overlap(hint_list, last_idx, bad_hint_idxs) if bad_hint_idxs: mm_hint_info.bad_hint_idxs |= bad_hint_idxs def update_hints(in_mm_hints, arg_list, hints, hint_mask, is_v=False): if in_mm_hints: hints.append(arg_list) i = len(hints) - 1 else: try: i = hints.index(arg_list) except ValueError: i = len(hints) hints.append(arg_list) if hint_mask: hint_list = hint_mask.v_list if is_v else hint_mask.h_list if hints[i] not in hint_list: hint_list.append(hints[i]) return i def convertBezToT2(bezString, mm_hint_info=None): # convert bez data to a T2 outline program, a list of operator tokens. # # Convert all bez ops to simplest T2 equivalent. # Add all hints to vertical and horizontal hint lists as encountered. # Insert a HintMask class whenever a new set of hints is encountered. # Add all hints as prefix to t2Program # After all operators have been processed, convert HintMask items into # hintmask ops and hintmask bytes. # Review operator list to optimize T2 operators. # # If doing MM-hinting, extra work is needed to maintain merge # compatibility between the reference font and the region fonts. # Although hints are generated for exactly the same outline features # in all fonts, they will have different values. Consequently, the # hints in a region font may not sort to the same order as in the # reference font. In addition, they may be filtered differently. Only # unique hints are added from the bez file to the hint list. Two hint # pairs may differ in one font, but not in another. # We work around these problems by first not filtering the hint # pairs for uniqueness when accumulating the hint lists. For the # reference font, once we have collected all the hints, we remove any # duplicate pairs, but keep a list of the retained hint pair indices # into the unfiltered hint pair list. For the region fonts, we # select hints from the unfiltered hint pair lists by using the selected # index list from the reference font. # Note that this breaks the CFF spec for snapshotted instances of the # CFF2 VF variable font, as hints may not be in ascending order, and the # hint list may contain duplicate hints. in_mm_hints = mm_hint_info is not None bezString = re.sub(r"%.+?\n", "", bezString) # suppress comments bezList = re.findall(r"(\S+)", bezString) if not bezList: return "" hhints = [] vhints = [] # Always assume a hint mask exists until proven # otherwise - make an initial HintMask. hint_mask = HintMask(0) hintMaskList = [hint_mask] vStem3Args = [] hStem3Args = [] v_stem3_list = [] h_stem3_list = [] arg_list = [] t2List = [] lastPathOp = None curX = 0 curY = 0 for token in bezList: try: val1 = round(float(token), 2) try: val2 = int(token) if int(val1) == val2: arg_list.append(val2) else: arg_list.append("%s 100 div" % int(val1 * 100)) except ValueError: arg_list.append(val1) continue except ValueError: pass if token == "newcolors": lastPathOp = token elif token in ["beginsubr", "endsubr"]: lastPathOp = token elif token == "snc": lastPathOp = token # The index into the t2list is kept # so we can quickly find them later. hint_mask = HintMask(len(t2List)) t2List.append([hint_mask]) hintMaskList.append(hint_mask) elif token == "enc": lastPathOp = token elif token == "rb": update_hints(in_mm_hints, arg_list, hhints, hint_mask, False) arg_list = [] lastPathOp = token elif token == "ry": update_hints(in_mm_hints, arg_list, vhints, hint_mask, True) arg_list = [] lastPathOp = token elif token == "rm": # vstem3 hints are vhints update_hints(in_mm_hints, arg_list, vhints, hint_mask, True) if (lastPathOp != token) and vStem3Args: # first rm, must be start of a new vstem3 # if we already have a set of vstems in vStem3Args, save them, # and then clear the vStem3Args so we can add the new set. v_stem3_list.append(vStem3Args) vStem3Args = [] vStem3Args.append(arg_list) arg_list = [] lastPathOp = token elif token == "rv": # hstem3 are hhints update_hints(in_mm_hints, arg_list, hhints, hint_mask, False) if (lastPathOp != token) and hStem3Args: # first rv, must be start of a new h countermask h_stem3_list.append(hStem3Args) hStem3Args = [] hStem3Args.append(arg_list) arg_list = [] lastPathOp = token elif token == "preflx1": # The preflx1/preflx2a sequence provides the same 'i' as the flex # sequence. The difference is that the preflx1/preflx2a sequence # provides the argument values needed for building a Type1 string # while the flex sequence is simply the 6 rrcurveto points. # Both sequences are always provided. lastPathOp = token arg_list = [] elif token == "preflx2a": lastPathOp = token del t2List[-1] arg_list = [] elif token == "flxa": lastPathOp = token argList1, curX, curY = makeRelativeCTArgs(arg_list[:6], curX, curY) argList2, curX, curY = makeRelativeCTArgs(arg_list[6:], curX, curY) arg_list = argList1 + argList2 t2List.append([arg_list[:12] + [50], "flex"]) arg_list = [] elif token == "sc": lastPathOp = token else: if token in ["rmt", "mt", "dt", "ct"]: lastPathOp = token t2Op = bezToT2.get(token, None) if token in ["mt", "dt"]: newList = [arg_list[0] - curX, arg_list[1] - curY] curX = arg_list[0] curY = arg_list[1] arg_list = newList elif token == "ct": arg_list, curX, curY = makeRelativeCTArgs(arg_list, curX, curY) if t2Op: t2List.append([arg_list, t2Op]) elif t2Op is None: raise KeyError("Unhandled operation %s %s" % (arg_list, token)) arg_list = [] # Add hints, if any. Must be done at the end of op processing to make sure # we have seen all the hints in the bez string. Note that the hintmask are # identified in the t2List by an index into the list; be careful NOT to # change the t2List length until the hintmasks have been converted. need_hint_masks = len(hintMaskList) > 1 if vStem3Args: v_stem3_list.append(vStem3Args) if hStem3Args: h_stem3_list.append(hStem3Args) t2Program = [] if hhints or vhints: if mm_hint_info is None: hhints.sort() vhints.sort() elif mm_hint_info.defined: # Apply hint order from reference font in MM hinting hhints = [hhints[j] for j in mm_hint_info.h_order] vhints = [vhints[j] for j in mm_hint_info.v_order] else: # Define hint order from reference font in MM hinting hhints, mm_hint_info.h_order = build_hint_order(hhints) vhints, mm_hint_info.v_order = build_hint_order(vhints) num_hhints = len(hhints) num_vhints = len(vhints) hint_limit = int((kStackLimit - 2) / 2) if num_hhints >= hint_limit: hhints = hhints[:hint_limit] if num_vhints >= hint_limit: vhints = vhints[:hint_limit] if mm_hint_info and mm_hint_info.defined: check_hint_pairs(hhints, mm_hint_info) last_idx = len(hhints) check_hint_pairs(vhints, mm_hint_info, last_idx) if hhints: t2Program = make_hint_list(hhints, need_hint_masks, is_h=True) if vhints: t2Program += make_hint_list(vhints, need_hint_masks, is_h=False) cntrmask_progam = None if mm_hint_info is None: if v_stem3_list or h_stem3_list: counter_mask_list = build_counter_mask_list(h_stem3_list, v_stem3_list) cntrmask_progam = [['cntrmask', cMask.maskByte(hhints, vhints)] for cMask in counter_mask_list] elif (not mm_hint_info.defined): if v_stem3_list or h_stem3_list: # this is the reference font - we need to build the list. counter_mask_list = build_counter_mask_list(h_stem3_list, v_stem3_list) cntrmask_progam = [['cntrmask', cMask.maskByte(hhints, vhints)] for cMask in counter_mask_list] mm_hint_info.cntr_masks = counter_mask_list else: # This is a region font - we need to used the reference font list. counter_mask_list = mm_hint_info.cntr_masks cntrmask_progam = [['cntrmask', cMask.mask] for cMask in counter_mask_list] if cntrmask_progam: cntrmask_progam = itertools.chain(*cntrmask_progam) t2Program.extend(cntrmask_progam) if need_hint_masks: # If there is not a hintsub before any drawing operators, then # add an initial first hint mask to the t2Program. if (mm_hint_info is None) or (not mm_hint_info.defined): # a single font and a reference font for mm hinting are # processed the same way if hintMaskList[1].listPos != 0: hBytes = hintMaskList[0].maskByte(hhints, vhints) t2Program.extend(["hintmask", hBytes]) if in_mm_hints: mm_hint_info.hint_masks.append(hintMaskList[0]) # Convert the rest of the hint masks # to a hintmask op and hintmask bytes. for hint_mask in hintMaskList[1:]: pos = hint_mask.listPos hBytes = hint_mask.maskByte(hhints, vhints) t2List[pos] = [["hintmask"], hBytes] if in_mm_hints: mm_hint_info.hint_masks.append(hint_mask) elif (mm_hint_info is not None): # This is a MM region font: # apply hint masks from reference font. try: hm0_mask = mm_hint_info.hint_masks[0].mask except IndexError: import pdb pdb.set_trace() if isinstance(t2List[0][0], HintMask): t2List[0] = [["hintmask"], hm0_mask] else: t2Program.extend(["hintmask", hm0_mask]) for hm in mm_hint_info.hint_masks[1:]: t2List[hm.listPos] = [["hintmask"], hm.mask] for entry in t2List: try: t2Program.extend(entry[0]) t2Program.append(entry[1]) except Exception: raise KeyError("Failed to extend t2Program with entry %s" % entry) if in_mm_hints: mm_hint_info.defined = True return t2Program def _run_tx(args): try: subprocess.check_call(["tx"] + args, stderr=subprocess.DEVNULL) except (subprocess.CalledProcessError, OSError) as e: raise FontParseError(e) class FixHintWidthDecompiler(SimpleT2Decompiler): # If we are using this class, we know the charstring has hints. def __init__(self, localSubrs, globalSubrs, private=None): self.hintMaskBytes = 0 # to silence false Codacy error. SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private) self.has_explicit_width = None self.h_hint_args = self.v_hint_args = None self.last_stem_index = None def op_hstem(self, index): self.countHints(is_vert=False) self.last_stem_index = index op_hstemhm = op_hstem def op_vstem(self, index): self.countHints(is_vert=True) self.last_stem_index = index op_vstemhm = op_vstem def op_hintmask(self, index): if not self.hintMaskBytes: # Note that I am assuming that there is never an op_vstemhm # followed by an op_hintmask. Since this is applied after saving # the font with fontTools, this is safe. self.countHints(is_vert=True) self.hintMaskBytes = (self.hintCount + 7) // 8 cs = self.callingStack[-1] hintMaskBytes, index = cs.getBytes(index, self.hintMaskBytes) return hintMaskBytes, index op_cntrmask = op_hintmask def countHints(self, is_vert): args = self.popall() if self.has_explicit_width is None: if (len(args) % 2) == 0: self.has_explicit_width = False else: self.has_explicit_width = True self.width_arg = args[0] args = args[1:] self.hintCount = self.hintCount + len(args) // 2 if is_vert: self.v_hint_args = args else: self.h_hint_args = args class CFFFontData: def __init__(self, path, font_format): self.inputPath = path self.font_format = font_format self.mm_hint_info_dict = {} self.t2_widths = {} self.is_cff2 = False self.is_vf = False self.vs_data_models = None if font_format == "OTF": # It is an OTF font, we can process it directly. font = TTFont(path) if "CFF " in font: cff_format = "CFF " elif "CFF2" in font: cff_format = "CFF2" self.is_cff2 = True else: raise FontParseError("OTF font has no CFF table <%s>." % path) else: # Else, package it in an OTF font. cff_format = "CFF " if font_format == "CFF": with open(path, "rb") as fp: data = fp.read() else: fd, temp_path = tempfile.mkstemp() os.close(fd) try: _run_tx(["-cff", "+b", "-std", path, temp_path]) with open(temp_path, "rb") as fp: data = fp.read() finally: os.remove(temp_path) font = TTFont() font['CFF '] = newTable('CFF ') font['CFF '].decompile(data, font) self.ttFont = font self.cffTable = font[cff_format] # for identifier in glyph-list: # Get charstring. self.topDict = self.cffTable.cff.topDictIndex[0] self.charStrings = self.topDict.CharStrings if 'fvar' in self.ttFont: # have not yet collected VF global data. self.is_vf = True fvar = self.ttFont['fvar'] CFF2 = self.cffTable CFF2.desubroutinize() topDict = CFF2.cff.topDictIndex[0] # We need a new charstring object into which we can save the # hinted CFF2 program data. Copying an existing charstring is a # little easier than creating a new one and making sure that all # properties are set correctly. self.temp_cs = copy.deepcopy(self.charStrings['.notdef']) self.vs_data_models = self.get_vs_data_models(topDict, fvar) def getGlyphList(self): return self.ttFont.getGlyphOrder() def getPSName(self): if self.is_cff2 and 'name' in self.ttFont: psName = next((name_rec.string for name_rec in self.ttFont[ 'name'].names if (name_rec.nameID == 6) and ( name_rec.platformID == 3))) psName = psName.decode('utf-16be') else: psName = self.cffTable.cff.fontNames[0] return psName def get_min_max(self, pTopDict, upm): if self.is_cff2 and 'hhea' in self.ttFont: font_max = self.ttFont['hhea'].ascent font_min = self.ttFont['hhea'].descent elif hasattr(pTopDict, 'FontBBox'): font_max = pTopDict.FontBBox[3] font_min = pTopDict.FontBBox[1] else: font_max = upm * 1.25 font_min = -upm * 0.25 alignment_min = min(-upm * 0.25, font_min) alignment_max = max(upm * 1.25, font_max) return alignment_min, alignment_max def convertToBez(self, glyphName, read_hints, round_coords, doAll=False): t2Wdth = None t2CharString = self.charStrings[glyphName] try: bezString, t2Wdth = convertT2GlyphToBez(t2CharString, read_hints, round_coords) # Note: the glyph name is important, as it is used by the C-code # for various heuristics, including [hv]stem3 derivation. bezString = "% " + glyphName + "\n" + bezString except SEACError: log.warning("Skipping %s: can't process SEAC composite glyphs.", glyphName) bezString = None self.t2_widths[glyphName] = t2Wdth return bezString def updateFromBez(self, bezData, glyphName, mm_hint_info=None): t2Program = convertBezToT2(bezData, mm_hint_info) if not self.is_cff2: t2_width_arg = self.t2_widths[glyphName] if t2_width_arg is not None: t2Program = [t2_width_arg] + t2Program if self.vs_data_models is not None: # It is a variable font. Accumulate the charstrings. self.glyph_programs.append(t2Program) else: # This is an MM source font. Update the font's charstring directly. t2CharString = self.charStrings[glyphName] t2CharString.program = t2Program def save(self, path): if path is None: path = self.inputPath if self.font_format == "OTF": self.ttFont.save(path) self.ttFont.close() else: data = self.ttFont["CFF "].compile(self.ttFont) if self.font_format == "CFF": with open(path, "wb") as fp: fp.write(data) else: fd, temp_path = tempfile.mkstemp() os.write(fd, data) os.close(fd) try: args = ["-t1", "-std"] if self.font_format == "PFB": args.append("-pfb") _run_tx(args + [temp_path, path]) finally: os.remove(temp_path) def close(self): self.ttFont.close() def isCID(self): return hasattr(self.topDict, "FDSelect") def hasFDArray(self): return self.is_cff2 or hasattr(self.topDict, "FDSelect") def flattenBlends(self, blendList): if type(blendList[0]) is list: flatList = [blendList[i][0] for i in range(len(blendList))] else: flatList = blendList return flatList def getFontInfo(self, allow_no_blues, noFlex, vCounterGlyphs, hCounterGlyphs, fdIndex=0): # The psautohint library needs the global font hint zones # and standard stem widths. # Format them into a single text string. # The text format is arbitrary, inherited from very old software, # but there is no real need to change it. pTopDict = self.topDict if hasattr(pTopDict, "FDArray"): pDict = pTopDict.FDArray[fdIndex] else: pDict = pTopDict privateDict = pDict.Private fdDict = fdTools.FDDict() fdDict.LanguageGroup = getattr(privateDict, "LanguageGroup", "0") if hasattr(pDict, "FontMatrix"): fdDict.FontMatrix = pDict.FontMatrix else: fdDict.FontMatrix = pTopDict.FontMatrix upm = int(1 / fdDict.FontMatrix[0]) fdDict.OrigEmSqUnits = str(upm) fdDict.FontName = getattr(pTopDict, "FontName", self.getPSName()) blueValues = getattr(privateDict, "BlueValues", [])[:] numBlueValues = len(blueValues) if numBlueValues < 4: low, high = self.get_min_max(pTopDict, upm) # Make a set of inactive alignment zones: zones outside of the # font BBox so as not to affect hinting. Used when source font has # no BlueValues or has invalid BlueValues. Some fonts have bad BBox # values, so I don't let this be smaller than -upm*0.25, upm*1.25. inactiveAlignmentValues = [low, low, high, high] if allow_no_blues: blueValues = inactiveAlignmentValues numBlueValues = len(blueValues) else: raise FontParseError("Font must have at least four values in " "its BlueValues array for PSAutoHint to " "work!") blueValues.sort() # The first pair only is a bottom zone, where the first value is the # overshoot position. The rest are top zones, and second value of the # pair is the overshoot position. blueValues = self.flattenBlends(blueValues) blueValues[0] = blueValues[0] - blueValues[1] for i in range(3, numBlueValues, 2): blueValues[i] = blueValues[i] - blueValues[i - 1] blueValues = [str(v) for v in blueValues] numBlueValues = min(numBlueValues, len(fdTools.kBlueValueKeys)) for i in range(numBlueValues): key = fdTools.kBlueValueKeys[i] value = blueValues[i] setattr(fdDict, key, value) if hasattr(privateDict, "OtherBlues"): # For all OtherBlues, the pairs are bottom zones, and # the first value of each pair is the overshoot position. i = 0 numBlueValues = len(privateDict.OtherBlues) blueValues = privateDict.OtherBlues[:] blueValues.sort() blueValues = self.flattenBlends(blueValues) for i in range(0, numBlueValues, 2): blueValues[i] = blueValues[i] - blueValues[i + 1] blueValues = [str(v) for v in blueValues] numBlueValues = min(numBlueValues, len(fdTools.kOtherBlueValueKeys)) for i in range(numBlueValues): key = fdTools.kOtherBlueValueKeys[i] value = blueValues[i] setattr(fdDict, key, value) if hasattr(privateDict, "StemSnapV"): vstems = privateDict.StemSnapV elif hasattr(privateDict, "StdVW"): vstems = [privateDict.StdVW] else: if allow_no_blues: # dummy value. Needs to be larger than any hint will likely be, # as the autohint program strips out any hint wider than twice # the largest global stem width. vstems = [upm] else: raise FontParseError("Font has neither StemSnapV nor StdVW!") vstems.sort() vstems = self.flattenBlends(vstems) if (len(vstems) == 0) or ((len(vstems) == 1) and (vstems[0] < 1)): vstems = [upm] # dummy value that will allow PyAC to run log.warning("There is no value or 0 value for DominantV.") fdDict.DominantV = "[" + " ".join([str(v) for v in vstems]) + "]" if hasattr(privateDict, "StemSnapH"): hstems = privateDict.StemSnapH elif hasattr(privateDict, "StdHW"): hstems = [privateDict.StdHW] else: if allow_no_blues: # dummy value. Needs to be larger than any hint will likely be, # as the autohint program strips out any hint wider than twice # the largest global stem width. hstems = [upm] else: raise FontParseError("Font has neither StemSnapH nor StdHW!") hstems.sort() hstems = self.flattenBlends(hstems) if (len(hstems) == 0) or ((len(hstems) == 1) and (hstems[0] < 1)): hstems = [upm] # dummy value that will allow PyAC to run log.warning("There is no value or 0 value for DominantH.") fdDict.DominantH = "[" + " ".join([str(v) for v in hstems]) + "]" if noFlex: fdDict.FlexOK = "false" else: fdDict.FlexOK = "true" # Add candidate lists for counter hints, if any. if vCounterGlyphs: temp = " ".join(vCounterGlyphs) fdDict.VCounterChars = "( %s )" % (temp) if hCounterGlyphs: temp = " ".join(hCounterGlyphs) fdDict.HCounterChars = "( %s )" % (temp) fdDict.BlueFuzz = getattr(privateDict, "BlueFuzz", 1) return fdDict def getfdIndex(self, name): gid = self.ttFont.getGlyphID(name) if hasattr(self.topDict, "FDSelect"): fdIndex = self.topDict.FDSelect[gid] else: fdIndex = 0 return fdIndex def getfdInfo(self, allow_no_blues, noFlex, vCounterGlyphs, hCounterGlyphs, glyphList, fdIndex=0): topDict = self.topDict fdGlyphDict = None # Get the default fontinfo from the font's top dict. fdDict = self.getFontInfo( allow_no_blues, noFlex, vCounterGlyphs, hCounterGlyphs, fdIndex) fontDictList = [fdDict] # Check the fontinfo file, and add any other font dicts srcFontInfo = os.path.dirname(self.inputPath) srcFontInfo = os.path.join(srcFontInfo, "fontinfo") if os.path.exists(srcFontInfo): with open(srcFontInfo, "r", encoding="utf-8") as fi: fontInfoData = fi.read() fontInfoData = re.sub(r"#[^\r\n]+", "", fontInfoData) else: return fdGlyphDict, fontDictList if "FDDict" in fontInfoData: maxY = topDict.FontBBox[3] minY = topDict.FontBBox[1] fdGlyphDict, fontDictList, finalFDict = fdTools.parseFontInfoFile( fontDictList, fontInfoData, glyphList, maxY, minY, self.getPSName()) if hasattr(topDict, "FDArray"): private = topDict.FDArray[fdIndex].Private else: private = topDict.Private if finalFDict is None: # If a font dict was not explicitly specified for the # output font, use the first user-specified font dict. fdTools.mergeFDDicts(fontDictList[1:], private) else: fdTools.mergeFDDicts([finalFDict], private) return fdGlyphDict, fontDictList @staticmethod def args_to_hints(hint_args): hints = [hint_args[0:2]] prev = hints[0] for i in range(2, len(hint_args), 2): bottom = hint_args[i] + prev[0] + prev[1] hints.append([bottom, hint_args[i + 1]]) prev = hints[-1] return hints @staticmethod def extract_hint_args(program): width = None h_hint_args = [] v_hint_args = [] for i, token in enumerate(program): if type(token) is str: if i % 2 != 0: width = program[0] del program[0] idx = i - 1 else: idx = i if (token[:4] == 'vstem') or token[-3:] == 'mask': h_hint_args = [] v_hint_args = program[:idx] elif token[:5] == 'hstem': h_hint_args = program[:idx] v_program = program[idx + 1:] for j, vtoken in enumerate(v_program): if type(vtoken) is str: if (vtoken[:5] == 'vstem') or vtoken[-4:] == \ 'mask': v_hint_args = v_program[:j] break break return width, h_hint_args, v_hint_args def fix_t2_program_hints(self, program, mm_hint_info, is_reference_font): width_arg, h_hint_args, v_hint_args = self.extract_hint_args(program) # 1. Build list of good [vh]hints. bad_hint_idxs = list(mm_hint_info.bad_hint_idxs) bad_hint_idxs.sort() num_hhint_pairs = len(h_hint_args) // 2 for idx in reversed(bad_hint_idxs): if idx < num_hhint_pairs: hint_args = h_hint_args bottom_idx = idx * 2 else: hint_args = v_hint_args bottom_idx = (idx - num_hhint_pairs) * 2 delta = hint_args[bottom_idx] + hint_args[bottom_idx + 1] del hint_args[bottom_idx:bottom_idx + 2] if len(hint_args) > bottom_idx: hint_args[bottom_idx] += delta # delete old hints from program if mm_hint_info.cntr_masks: last_hint_idx = program.index('cntrmask') elif mm_hint_info.hint_masks: last_hint_idx = program.index('hintmask') else: for op in ['vstem', 'hstem']: try: last_hint_idx = program.index(op) break except IndexError: last_hint_idx = None if last_hint_idx is not None: del program[:last_hint_idx] # If there were v_hint_args, but they have now all been # deleted, the first token will still be 'vstem[hm]'. Delete it. if ((not v_hint_args) and program[0].startswith('vstem')): del program[0] # Add width and updated hints back. if width_arg is not None: hint_program = [width_arg] else: hint_program = [] if h_hint_args: op_hstem = 'hstemhm' if mm_hint_info.hint_masks else 'hstem' hint_program.extend(h_hint_args) hint_program.append(op_hstem) if v_hint_args: hint_program.extend(v_hint_args) # Don't need to append op_vstem, as this is still in hint_program. program = hint_program + program # Re-calculate the hint masks. if is_reference_font: hhints = self.args_to_hints(h_hint_args) vhints = self.args_to_hints(v_hint_args) for hm in mm_hint_info.hint_masks: hm.maskByte(hhints, vhints) # Apply fixed hint masks if mm_hint_info.hint_masks: hm_pos_list = [i for i, token in enumerate(program) if token == 'hintmask'] for i, hm in enumerate(mm_hint_info.hint_masks): pos = hm_pos_list[i] program[pos + 1] = hm.mask # Now fix the control masks. We will weed out a control mask # if it ends up with fewer than 3 hints. cntr_masks = mm_hint_info.cntr_masks if is_reference_font and cntr_masks: # Update mask bytes, # and remove control masks with fewer than 3 bits. mask_byte_list = [cm.mask for cm in cntr_masks] for cm in cntr_masks: cm.maskByte(hhints, vhints) new_cm_list = [cm for cm in cntr_masks if cm.num_bits >= 3] new_mask_byte_list = [cm.mask for cm in new_cm_list] if new_mask_byte_list != mask_byte_list: mm_hint_info.new_cntr_masks = new_cm_list if mm_hint_info.new_cntr_masks: # Remove all the old cntrmask ops num_old_cm = len(cntr_masks) idx = program.index('cntrmask') del program[idx:idx + num_old_cm * 2] cm_progam = [['cntrmask', cm.mask] for cm in mm_hint_info.new_cntr_masks] cm_progam = list(itertools.chain(*cm_progam)) program[idx:idx] = cm_progam return program def fix_glyph_hints(self, glyph_name, mm_hint_info, is_reference_font=None): # 1. Delete any bad hints. # 2. If reference font, recalculate the hint mask byte strings # 3. Replace hint masks. # 3. Fix cntr masks. if self.is_vf: # We get called once, and fix all the charstring programs. for i, t2_program in enumerate(self.glyph_programs): self.glyph_programs[i] = self.fix_t2_program_hints( t2_program, mm_hint_info, is_reference_font=(i == 0)) else: # we are called for each font in turn try: t2CharString = self.charStrings[glyph_name] except KeyError: return # Happens with sparse sources - just skip the glyph. program = self.fix_t2_program_hints(t2CharString.program, mm_hint_info, is_reference_font) t2CharString.program = program def get_vf_bez_glyphs(self, glyph_name): charstring = self.charStrings[glyph_name] if 'vsindex' in charstring.program: op_index = charstring.program.index('vsindex') vsindex = charstring.program[op_index - 1] else: vsindex = 0 self.vsindex = vsindex self.glyph_programs = [] vs_data_model = self.vs_data_model = self.vs_data_models[vsindex] bez_list = [] for vsi in vs_data_model.master_vsi_list: t2_program = interpolate_cff2_charstring(charstring, glyph_name, vsi.interpolateFromDeltas, vsindex) self.temp_cs.program = t2_program bezString, _ = convertT2GlyphToBez(self.temp_cs, True, True) # DBG Adding glyph name is useful only for debugging. bezString = "% {}\n".format(glyph_name) + bezString bez_list.append(bezString) return bez_list @staticmethod def get_vs_data_models(topDict, fvar): otvs = topDict.VarStore.otVarStore region_list = otvs.VarRegionList.Region axis_tags = [axis_entry.axisTag for axis_entry in fvar.axes] vs_data_models = [] for vsindex, var_data in enumerate(otvs.VarData): vsi = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, {}) master_vsi_list = [vsi] for region_idx in var_data.VarRegionIndex: region = region_list[region_idx] loc = {} for i, axis in enumerate(region.VarRegionAxis): loc[axis_tags[i]] = axis.PeakCoord vsi = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc) master_vsi_list.append(vsi) vdm = VarDataModel(var_data, vsindex, master_vsi_list) vs_data_models.append(vdm) return vs_data_models def merge_hinted_glyphs(self, name): new_t2cs = merge_hinted_programs(self.temp_cs, self.glyph_programs, name, self.vs_data_model) if self.vsindex: new_t2cs.program = [self.vsindex, 'vsindex'] + new_t2cs.program self.charStrings[name] = new_t2cs def interpolate_cff2_charstring(charstring, gname, interpolateFromDeltas, vsindex): # Interpolate charstring # e.g replace blend op args with regular args, # and discard vsindex op. new_program = [] last_i = 0 program = charstring.program for i, token in enumerate(program): if token == 'vsindex': if last_i != 0: new_program.extend(program[last_i:i - 1]) last_i = i + 1 elif token == 'blend': num_regions = charstring.getNumRegions(vsindex) numMasters = 1 + num_regions num_args = program[i - 1] # The program list starting at program[i] is now: # ..args for following operations # num_args values from the default font # num_args tuples, each with numMasters-1 delta values # num_blend_args # 'blend' argi = i - (num_args * numMasters + 1) if last_i != argi: new_program.extend(program[last_i:argi]) end_args = tuplei = argi + num_args master_args = [] while argi < end_args: next_ti = tuplei + num_regions deltas = program[tuplei:next_ti] val = interpolateFromDeltas(vsindex, deltas) master_val = program[argi] master_val += otRound(val) master_args.append(master_val) tuplei = next_ti argi += 1 new_program.extend(master_args) last_i = i + 1 if last_i != 0: new_program.extend(program[last_i:]) return new_program def merge_hinted_programs(charstring, t2_programs, gname, vs_data_model): num_masters = vs_data_model.num_masters var_pen = CFF2CharStringMergePen([], gname, num_masters, 0) charstring.outlineExtractor = MergeOutlineExtractor for i, t2_program in enumerate(t2_programs): var_pen.restart(i) charstring.program = t2_program charstring.draw(var_pen) new_charstring = var_pen.getCharString( private=charstring.private, globalSubrs=charstring.globalSubrs, var_model=vs_data_model, optimize=True) return new_charstring @_add_method(VarStoreInstancer) def get_scalars(self, vsindex, region_idx): varData = self._varData # The index key needs to be the master value index, which includes # the default font value. VarRegionIndex provides the region indices. scalars = {0: 1.0} # The default font always has a weight of 1.0 region_index = varData[vsindex].VarRegionIndex for idx in range(region_idx): # omit the scalar for the region. scalar = self._getScalar(region_index[idx]) if scalar: scalars[idx + 1] = scalar return scalars class VarDataModel(object): def __init__(self, var_data, vsindex, master_vsi_list): self.master_vsi_list = master_vsi_list self.var_data = var_data self._num_masters = len(master_vsi_list) self.delta_weights = [{}] # for default font value for region_idx, vsi in enumerate(master_vsi_list[1:]): scalars = vsi.get_scalars(vsindex, region_idx) self.delta_weights.append(scalars) @property def num_masters(self): return self._num_masters def getDeltas(self, master_values, *, round=noRound): assert len(master_values) == len(self.delta_weights) out = [] for i, scalars in enumerate(self.delta_weights): delta = master_values[i] for j, scalar in scalars.items(): if scalar: delta -= out[j] * scalar out.append(round(delta)) return out
apache-2.0
-5,200,578,984,855,039,000
37.201458
79
0.557419
false
mapzen/vector-datasource
integration-test/593-early-step.py
2
1194
from . import FixtureTest class EarlyStep(FixtureTest): def test_steps_with_regional_route(self): self.load_fixtures([ 'https://www.openstreetmap.org/way/24655593', 'https://www.openstreetmap.org/relation/2260059', ], clip=self.tile_bbox(12, 653, 1582)) self.assert_has_feature( 12, 653, 1582, 'roads', {'kind_detail': 'steps'}) def test_steps_without_route(self): self.load_fixtures([ 'https://www.openstreetmap.org/way/38060491', ]) self.assert_has_feature( 13, 1309, 3166, 'roads', {'kind_detail': 'steps'}) def test_min_zoom(self): # way 25292070 highway=steps, no route, but has name (Esmeralda, # Bernal, SF) self.load_fixtures(['https://www.openstreetmap.org/way/25292070']) self.assert_no_matching_feature( 13, 1310, 3167, 'roads', {'kind': 'path', 'kind_detail': 'steps', 'name': 'Esmeralda Ave.'}) self.assert_has_feature( 14, 2620, 6334, 'roads', {'kind': 'path', 'kind_detail': 'steps', 'name': 'Esmeralda Ave.'})
mit
-3,792,889,577,285,473,300
31.27027
74
0.546901
false
trinaldi/dotfiles
weechat/python/lnotify.py
1
4806
# Project: lnotify # Description: A libnotify script for weechat. Uses # subprocess.call to execute notify-send with arguments. # Author: kevr <[email protected]> # License: GPL3 # # 0.1.2 # added option to display weechat's icon by tomboy64 # # 0.1.3 # changed the way that icon to WeeChat notification is specified. # (No absolute path is needed) # /usr/bin/notify-send isn't needed anymore. # (pynotify is handling notifications now) # changed the way that lnotify works. When using gnome 3, every new # notification was creating a new notification instance. The way that # it is now, all WeeChat notifications are in a group (that have the # WeeChat icon and have WeeChat name). # Got report that it has better look for KDE users too. # # 0.1.4 # change hook_print callback argument type of displayed/highlight # (WeeChat >= 1.0) # # 0.2.0 # - changed entire system to hook_process_hashtable calls to notify-send # - also changed the configuration option names and methods # Note: If you want pynotify, refer to the 'notify.py' weechat script # # 0.3.0 # - added check to see whether the window has x focus so that notify will # still fire if the conversation tab is open, but the x window is not. # Note: This will check whether X is running first and whether xdotool # is installed. If anybody knows a better way to do this, please let me know. # # 0.3.1 # Fix https://github.com/weechat/scripts/issues/114 - where we would get # notifications for messages that we sent import weechat as weechat import subprocess from os import environ, path lnotify_name = "lnotify" lnotify_version = "0.3.1" lnotify_license = "GPL3" # convenient table checking for bools true = { "on": True, "off": False } # declare this here, will be global config() object # but is initialized in __main__ cfg = None class config(object): def __init__(self): # default options for lnotify self.opts = { "highlight": "on", "query": "on", "notify_away": "off", "icon": "weechat", } self.init_config() self.check_config() def init_config(self): for opt, value in self.opts.items(): temp = weechat.config_get_plugin(opt) if not len(temp): weechat.config_set_plugin(opt, value) def check_config(self): for opt in self.opts: self.opts[opt] = weechat.config_get_plugin(opt) def __getitem__(self, key): return self.opts[key] def printc(msg): weechat.prnt("", msg) def handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message): highlight = bool(highlight) and cfg["highlight"] query = true[cfg["query"]] notify_away = true[cfg["notify_away"]] buffer_type = weechat.buffer_get_string(pbuffer, "localvar_type") away = weechat.buffer_get_string(pbuffer, "localvar_away") x_focus = False window_name = "" my_nickname = "nick_" + weechat.buffer_get_string(pbuffer, "localvar_nick") # Check to make sure we're in X and xdotool exists. # This is kinda crude, but I'm no X master. if (environ.get('DISPLAY') != None) and path.isfile("/bin/xdotool"): window_name = subprocess.check_output(["xdotool", "getwindowfocus", "getwindowname"]) if "WeeChat" in window_name: x_focus = True if pbuffer == weechat.current_buffer() and x_focus: return weechat.WEECHAT_RC_OK if away and not notify_away: return weechat.WEECHAT_RC_OK if my_nickname in tags: return weechat.WEECHAT_RC_OK buffer_name = weechat.buffer_get_string(pbuffer, "short_name") if buffer_type == "private" and query: notify_user(buffer_name, message) elif buffer_type == "channel" and highlight: notify_user("{} @ {}".format(prefix, buffer_name), message) return weechat.WEECHAT_RC_OK def process_cb(data, command, return_code, out, err): if return_code == weechat.WEECHAT_HOOK_PROCESS_ERROR: weechat.prnt("", "Error with command '%s'" % command) elif return_code != 0: weechat.prnt("", "return_code = %d" % return_code) weechat.prnt("", "notify-send has an error") return weechat.WEECHAT_RC_OK def notify_user(origin, message): hook = weechat.hook_process_hashtable("notify-send", { "arg1": "-i", "arg2": cfg["icon"], "arg3": "-a", "arg4": "WeeChat", "arg5": origin, "arg6": message }, 20000, "process_cb", "") return weechat.WEECHAT_RC_OK # execute initializations in order if __name__ == "__main__": weechat.register(lnotify_name, "kevr", lnotify_version, lnotify_license, "{} - A libnotify script for weechat".format(lnotify_name), "", "") cfg = config() print_hook = weechat.hook_print("", "", "", 1, "handle_msg", "")
gpl-2.0
6,757,079,249,026,896,000
32.144828
93
0.653766
false
jpn--/larch
larch/data_services/h5/h5pod/generic.py
1
31510
import os from pathlib import Path import tables as tb import numpy import pandas import logging from ....util import Dict from ....util.aster import asterize from ....util.text_manip import truncate_path_for_display from ... import _reserved_names_ from ...pod import Pod from ...general import _sqz_same, selector_len_for from .... import warning class IncompatibleShape(ValueError): pass class NoKnownShape(ValueError): pass class CArray(tb.CArray): @property def DICTIONARY(self): return Dict( self._v_attrs.DICTIONARY ) @DICTIONARY.setter def DICTIONARY(self, x): self._v_attrs.DICTIONARY = dict(x) @property def DESCRIPTION(self): return self._v_attrs.TITLE @DESCRIPTION.setter def DESCRIPTION(self, x): self._v_attrs.TITLE = str(x) @property def TITLE(self): return self._v_attrs.TITLE @TITLE.setter def TITLE(self, x): self._v_attrs.TITLE = str(x) def __repr__(self): r = super().__repr__() try: d = self.DICTIONARY except: return r r += "\n dictionary := {\n " r += repr(d).replace("\n","\n ") r += "\n }" return r def uniques(self, slicer=None, counts=False): if isinstance(slicer, (bool, int)) and counts is False: counts = bool(slicer) slicer = None if slicer is None: slicer = slice(None) action = self[slicer] len_action = len(action) try: action = action[~numpy.isnan(action)] except TypeError: num_nan = 0 else: num_nan = len_action - len(action) if counts: x = numpy.unique(action, return_counts=counts) try: d = self.DICTIONARY except AttributeError: y = pandas.Series(x[1], x[0]) else: y = pandas.Series(x[1], [(d[j] if j in d else j) for j in x[0]]) if num_nan: y[numpy.nan] = num_nan return y if num_nan: numpy.append(action, numpy.nan) return numpy.unique(action) class H5Pod(Pod): def __init__(self, filename=None, mode='a', groupnode=None, *, h5f=None, inmemory=False, temp=False, complevel=1, complib='zlib', do_nothing=False, ident=None, shape=None, ): super().__init__(ident=ident) if do_nothing: return if isinstance(filename, H5Pod): # Copy / Re-Class contructor x = filename self._groupnode = x._groupnode self._h5f_own = False return if isinstance(filename, tb.group.Group) and groupnode is None: # Called with just a group node, use it groupnode = filename filename = None if isinstance(filename, (str,Path)): filename = os.fspath(filename) if groupnode is None: groupnode = "/" if filename is None and mode=='a' and groupnode is None: groupnode = '/' # default constructor for temp obj if isinstance(groupnode, tb.group.Group): # Use the existing info from this group node, ignore all other inputs self._groupnode = groupnode filename = groupnode._v_file.filename mode = groupnode._v_file.mode self._h5f_own = False elif isinstance(groupnode, str): # apply expanduser to filename to allow for home-folder based filenames if isinstance(filename,str): filename = os.path.expanduser(filename) if filename is None: temp = True from ....util.temporaryfile import TemporaryFile self._TemporaryFile = TemporaryFile(suffix='.h5d') filename = self._TemporaryFile.name if h5f is not None: self._h5f_own = False self._groupnode = self._h5f.get_node(groupnode) else: kwd = {} if inmemory or temp: kwd['driver']="H5FD_CORE" if temp: kwd['driver_core_backing_store']=0 if complevel is not None: kwd['filters']=tb.Filters(complib=complib, complevel=complevel) self._h5f_obj = tb.open_file(filename, mode, **kwd) self._h5f_own = True try: self._groupnode = self._h5f_obj.get_node(groupnode) except tb.NoSuchNodeError: if isinstance(groupnode, str): self._groupnode = self._h5f_obj._get_or_create_path(groupnode, True) else: raise ValueError('must give groupnode as `str` or `tables.group.Group`') self._recoverable = (filename, self._groupnode._v_pathname) if shape is not None: self.shape = shape @property def _h5f(self): return self._groupnode._v_file def __repr__(self): from ....util.text_manip import max_len s = f"<larch.{self.__class__.__name__}>" try: s += f"\n | file: {truncate_path_for_display(self.filename)}" if self._groupnode._v_pathname != "/": s += f"\n | node: {self._groupnode._v_pathname}" try: shape = self.shape except NoKnownShape: shape = None else: s += f"\n | shape: {shape}" try: metashape = self.metashape except (NoKnownShape, AttributeError): pass else: if metashape != shape: s += f"\n | metashape: {metashape}" if len(self._groupnode._v_children): s += "\n | data:" just = max_len(self._groupnode._v_children.keys()) for i in sorted(self._groupnode._v_children.keys()): try: node_dtype = self._groupnode._v_children[i].dtype except tb.NoSuchNodeError: node_dtype = "<no dtype>" s += "\n | {0:{2}s} ({1})".format(i, node_dtype, just) else: s += "\n | data: <empty>" except (tb.ClosedNodeError, tb.ClosedFileError): s += f"\n | <file is closed>" s += f"\n | file: {truncate_path_for_display(self._recoverable[0])}" s += f"\n | node: {self._recoverable[1]}" return s def __xml__(self, no_data=False, descriptions=True, dictionaries=False): from xmle import Elem x = Elem('div') t = x.elem('table', style="margin-top:1px;") t.elem('caption', text=f"<larch.{self.__class__.__name__}>", style="caption-side:top;text-align:left;font-family:Roboto;font-weight:700;font-style:normal;font-size:100%;padding:0px;") # try: ident = self.ident except AttributeError: pass else: tr = t.elem('tr') tr.elem('th', text='ident') tr.elem('td', text=ident) # try: filename = self.filename except AttributeError: pass else: tr = t.elem('tr') tr.elem('th', text='file') tr.elem('td', text=truncate_path_for_display(filename)) # try: filemode = self.filemode except AttributeError: pass else: tr = t.elem('tr') tr.elem('th', text='mode') tr.elem('td', text=truncate_path_for_display(self.filemode)) # try: v_pathname = self._groupnode._v_pathname except AttributeError: pass else: if self._groupnode._v_pathname != "/": tr = t.elem('tr') tr.elem('th', text='node') tr.elem('td', text=self._groupnode._v_pathname) # try: str_shape = str(self.shape) except NoKnownShape: pass else: tr = t.elem('tr') tr.elem('th', text='shape') tr.elem('td', text=str_shape) # try: str_shape = str(self.metashape) except (NoKnownShape, AttributeError): pass else: tr = t.elem('tr') tr.elem('th', text='metashape') tr.elem('td', text=str_shape) # try: str_durable_mask = f"0x{self.durable_mask:X}" except (AttributeError): pass else: if str_durable_mask!='0x0': tr = t.elem('tr') tr.elem('th', text='durable_mask') tr.elem('td', text=str_durable_mask) # if not no_data: if len(self._groupnode._v_children): tr = t.elem('tr') tr.elem('th', text='data', style='vertical-align:top;') td = tr.elem('td') t1 = td.elem('table', cls='dictionary') t1head = t1.elem('thead') t1headr = t1head.elem('tr') t1headr.elem('th', text='name') t1headr.elem('th', text='dtype') if descriptions: t1headr.elem('th', text='description') any_sources = 0 for i in sorted(self._groupnode._v_children.keys()): try: node_dtype = self._groupnode._v_children[i].dtype except (tb.NoSuchNodeError, AttributeError): node_dtype = "<no dtype>" if i not in _reserved_names_: tr1 = t1.elem('tr') tr1.elem('td', text=i) tr1.elem('td', text=node_dtype) if descriptions: try: title = self._groupnode._v_children[i]._v_attrs['TITLE'] except: title = "" else: tr1.elem('td', text=title) try: orig_source = self._groupnode._v_children[i]._v_attrs['ORIGINAL_SOURCE'] except: pass else: tr1.elem('td', text=orig_source) any_sources += 1 if any_sources: t1headr.elem('th', text='source') else: tr = t.elem('tr') tr.elem('th', text='data', style='vertical-align:top;') tr.elem('td', text='<empty>') return x def _repr_html_(self): return self.__xml__().tostring() def change_mode(self, mode, **kwarg): """Change the file mode of the underlying HDF5 file. Can be used to change from read-only to read-write. """ if mode == self.filemode: return if mode == 'w': raise TypeError("cannot change_mode to w, close the file and delete it") filename = self.filename groupnode_path = self._groupnode._v_pathname self.close() self.__init__(filename, mode, groupnode=groupnode_path, **kwarg) return self def reopen(self, mode='r', **kwarg): """Reopen the underlying HDF5 file. Can be used to change from read-only to read-write or to reopen a file that was closed. """ if mode == self.filemode: return if mode == 'w': raise TypeError("cannot change_mode to w, close the file and delete it") filename = self.filename groupnode_path = self.groupnode_path try: self.close() except tb.ClosedNodeError: pass self.__init__(filename, mode, groupnode=groupnode_path, **kwarg) return self def names(self): return [i for i in self._groupnode._v_children.keys() if i not in _reserved_names_] def rename_vars(self, *rename_vars): """ Rename variables according to patterns. Parameters ---------- rename_vars : 2-tuples A sequence of 2-tuples, giving (pattern, replacement) that will be fed to re.sub. For example, give ('^','prefix_') to add prefix to all variable names, or ('^from_this_name$','to_this_name') to change an exact name from one thing to another. """ import re for pattern, replacement in rename_vars: q = [(re.sub(pattern, replacement, k),k) for k in self.names()] for _to,_from in q: if _to != _from: self._groupnode._v_children[_from]._f_rename(_to) def reshape(self, *shape): if len(shape)==0: raise ValueError('no shape given') if len(shape)==1 and isinstance(shape[0], tuple): shape = shape[0] if isinstance(shape, int): shape = (shape,) if not isinstance(shape, tuple): raise TypeError('reshape must be int or tuple') if len(shape)==1 and shape[0] == -1: shape = (int(numpy.product(self.shape)), ) elif len(shape)==2: if shape[0] == -1 and shape[1] > 0: shape = (int(numpy.product(self.shape) / shape[1]), shape[1]) if shape[1] == -1 and shape[0] > 0: shape = (shape[0], int(numpy.product(self.shape) / shape[0])) if numpy.product(shape) != numpy.product(self.shape): raise ValueError(f'incompatible reshape {shape} for current shape {self.shape}') #print("reshape to", shape) for k in self._groupnode._v_children.keys(): #print("reshape",k,shape) self._groupnode._v_children[k].shape = shape self.shape = shape def __dir__(self): x = super().__dir__() x.extend(self.names()) return x @property def filename(self): try: return self._groupnode._v_file.filename except (tb.ClosedNodeError, tb.ClosedFileError) as err: try: return self._last_closed_filename except AttributeError: raise err @property def filemode(self): try: return self._groupnode._v_file.mode except (tb.ClosedNodeError, tb.ClosedFileError) as err: return None @property def groupnode_path(self): try: return self._groupnode._v_pathname except (tb.ClosedNodeError, tb.ClosedFileError) as err: try: return self._last_closed_groupnode_path except AttributeError: raise err @property def n_cases(self): return self.shape[0] @property def shape(self): """The shape of the pod. """ if 'SHAPE' in self._groupnode._v_attrs: return tuple(self._groupnode._v_attrs['SHAPE'][:]) if len(self.names()): for v in self._groupnode._v_children.values(): try: found_shape = v.shape except: pass else: try: self.shape = found_shape except: pass return tuple(found_shape) raise NoKnownShape() @shape.setter def shape(self, x): # if self._groupnode._v_nchildren > 0: # raise ValueError('this pod has shape {!s} but you want to set {!s}'.format(self.shape, x)) # if self._groupnode._v_nchildren == 0: self._groupnode._v_attrs.SHAPE = numpy.asarray(x, dtype=int) @property def metashape(self): """The actual shape of the data underlying the pod, often same as shape.""" return self.shape def add_expression(self, name, expression, *, overwrite=False, title=None, dictionary=None, dtype=None): arr = self[expression] if dtype is not None: arr = arr.astype(dtype) try: dtype_str = "("+dtype.__name__+")" except: dtype_str = "" original_source = f'={dtype_str} {expression}' else: original_source = f'= {expression}' if overwrite=='ignore': overwrite = False if_exists = 'ignore' else: if_exists = 'raise' try: self.add_array(name, arr, overwrite=overwrite, title=title, dictionary=dictionary, original_source=original_source, rel_original_source=False) except tb.exceptions.NodeError: if if_exists=='ignore': pass else: raise def add_array(self, name, arr, *, overwrite=False, original_source=None, rel_original_source=True, title=None, dictionary=None, fix_name_problems=True): """Create a new variable in the H5Pod. Creating a new variable in the data might be convenient in some instances. If you create an array externally, you can add it to the file easily with this command. Parameters ---------- name : str The name of the new variable. arr : ndarray An array to add as the new variable. Must have the correct shape. overwrite : bool Should the variable be overwritten if it already exists, default to False. original_source : str Optionally, give the file name or other description of the source of the data in this array. rel_original_source : bool If true, change the absolute path of the original_source to a relative path viz this file. title : str, optional A descriptive title for the variable, typically a short phrase but an arbitrary length description is allowed. dictionary : dict, optional A data dictionary explaining some or all of the values in this field. Even for otherwise self-explanatory numerical values, the dictionary may give useful information about particular out of range values. Raises ------ tables.exceptions.NodeError If a variable of the same name already exists. """ if name in _reserved_names_: raise ValueError(f'{name} is a reserved name') if '/' in name and fix_name_problems: import warnings warnings.warn(f'the ``/`` character is not allowed in variable names ({name})\n' f'changing it to ``|``') name = name.replace('/','|') try: existing_shape = tuple(self.metashape) except NoKnownShape: pass else: if existing_shape != arr.shape: # maybe just has extra size-1 dims, check for that... arr = arr.squeeze() if self.podtype == 'idcs': if existing_shape[:-1] != arr.shape: raise IncompatibleShape( "new array must have shape {!s} but the array given has shape {!s}".format(self.shape, arr.shape)) else: if existing_shape != arr.shape: raise IncompatibleShape( "new array must have shape {!s} but the array given has shape {!s}".format(self.shape, arr.shape)) if overwrite: self.delete_array(name) try: h5var = self._h5f.create_carray(self._groupnode, name, obj=arr) except ValueError as valerr: if "unknown type" in str(valerr) or "unknown kind" in str(valerr): # changed for pytables 3.3 try: tb_atom = tb.Atom.from_dtype(arr.dtype) except ValueError: log = logging.getLogger('H5') try: maxlen = int(len(max(arr.astype(str), key=len))) except ValueError: import datetime if 0: # isinstance(arr[0], datetime.time): log.warning(f" column {name} is datetime.time, converting to Time32") tb_atom = tb.atom.Time32Atom() # convert_datetime_time_to_epoch_seconds = lambda tm: tm.hour*3600+ tm.minute*60 + tm.second def convert_datetime_time_to_epoch_seconds(tm): try: return tm.hour * 3600 + tm.minute * 60 + tm.second except: if numpy.isnan(tm): return 0 else: raise arr = arr.apply(convert_datetime_time_to_epoch_seconds).astype(numpy.int32).values else: # import __main__ # __main__.err_df = df raise else: maxlen = max(maxlen, 8) if arr.dtype != object: log.warning(f"cannot create column {name} as dtype {arr.dtype}, converting to S{maxlen:d}") arr = arr.astype('S{}'.format(maxlen)) tb_atom = tb.Atom.from_dtype(arr.dtype) h5var = self._h5f.create_carray(self._groupnode, name, tb_atom, shape=arr.shape) h5var[:] = arr else: raise if rel_original_source and original_source and original_source[0] != '=': basedir = os.path.dirname(self.filename) original_source = os.path.relpath(original_source, start=basedir) if original_source is not None: h5var._v_attrs.ORIGINAL_SOURCE = original_source if title is not None: h5var._v_attrs.TITLE = title if dictionary is not None: h5var._v_attrs.DICTIONARY = dictionary def add_blank(self, name, shape=None, dtype=numpy.float64, **kwargs): """Create a new variable in the H5Pod. Creating a new variable in the data might be convenient in some instances. If you create an array externally, you can add it to the file easily with this command. Parameters ---------- name : str The name of the new variable. dtype : dtype The dtype of the empty array to add as the new variable. shape : tuple The shape of the empty array to add. Must be compatible with existing shape, if any. Other keyword parameters are passed through to `add_array`. Raises ------ tables.exceptions.NodeError If a variable of the same name already exists. NoKnownShape If shape is not given and not already known from the file. """ if name in _reserved_names_: raise ValueError(f'{name} is a reserved name') try: existing_shape = tuple(self.metashape) except NoKnownShape: if shape is None: raise else: if shape is None: shape = existing_shape if existing_shape != tuple(shape): raise IncompatibleShape( "new array must have shape {!s} but the array given has shape {!s}".format(self.shape, shape)) return self.add_array(name, numpy.zeros(shape, dtype=dtype), **kwargs) def delete_array(self, name, recursive=True): """Delete an existing variable. Parameters ---------- name : str The name of the data node to remove. recursive : bool If the data node is a group, recursively remove all sub-nodes. """ if name in _reserved_names_: raise ValueError(f'{name} is a reserved name') try: self._h5f.remove_node(self._groupnode, name, recursive) except tb.exceptions.NoSuchNodeError: pass def flush(self, *arg, **kwargs): return self._h5f.flush(*arg, **kwargs) def close(self, *arg, **kwargs): try: self._last_closed_filename = self.filename self._last_closed_groupnode_path = self.groupnode_path except: pass return self._h5f.close(*arg, **kwargs) @property def podtype(self): return '' def uri(self, mode=None): from urllib.parse import urlunparse q_dict = {} if self.podtype: q_dict['type'] = self.podtype if mode: q_dict['mode'] = mode q = "&".join(f'{k}={v}' for k,v in q_dict.items()) return urlunparse(['file', '', self.filename, '', q, self._groupnode._v_pathname]) def _remake_command(self, cmd, selector=None, receiver=None): from tokenize import tokenize, untokenize, NAME, OP, STRING DOT = (OP, '.') COLON = (OP, ':') COMMA = (OP, ',') OBRAC = (OP, '[') CBRAC = (OP, ']') OPAR = (OP, '(') CPAR = (OP, ')') from io import BytesIO recommand = [] if receiver: recommand += [(NAME, receiver), OBRAC, COLON, CBRAC, (OP, '='), ] try: cmd_encode = cmd.encode('utf-8') except AttributeError: cmd_encode = str(cmd).encode('utf-8') dims = len(self.shape) g = tokenize(BytesIO(cmd_encode).readline) if selector is None: screen_tokens = [COLON,] else: # try: # slicer_encode = selector.encode('utf-8') # except AttributeError: # slicer_encode = str(selector).encode('utf-8') # screen_tokens = [(toknum, tokval) for toknum, tokval, _, _, _ in tokenize(BytesIO(slicer_encode).readline)] screen_tokens = [(NAME, 'selector'), ] for toknum, tokval, _, _, _ in g: if toknum == NAME and tokval in self._groupnode: # replace NAME tokens partial = [(NAME, 'self'), DOT, (NAME, '_groupnode'), DOT, (NAME, tokval), OBRAC, ] partial += screen_tokens if len(self._groupnode._v_children[tokval].shape)>1: partial += [COMMA, COLON, ] if len(self._groupnode._v_children[tokval].shape)>2: partial += [COMMA, COLON, ] if len(self._groupnode._v_children[tokval].shape)>3: partial += [COMMA, COLON, ] partial += [CBRAC,] recommand.extend(partial) else: recommand.append((toknum, tokval)) # print("<recommand>") # print(recommand) # print("</recommand>") ret = untokenize(recommand).decode('utf-8') return asterize(ret, mode="exec" if receiver is not None else "eval"), ret def _evaluate_single_item(self, cmd, selector=None, receiver=None): j, j_plain = self._remake_command(cmd, selector=selector, receiver='receiver' if receiver is not None else None) # important globals from ....util.aster import inXd from numpy import log, exp, log1p, absolute, fabs, sqrt, isnan, isfinite, logaddexp, fmin, fmax, nan_to_num, sin, cos, pi from ....util.common_functions import piece, normalize, boolean try: if receiver is not None: exec(j) else: return eval(j) except Exception as exc: args = exc.args if not args: arg0 = '' else: arg0 = args[0] arg0 = arg0 + '\nwithin parsed command: "{!s}"'.format(cmd) arg0 = arg0 + '\nwithin re-parsed command: "{!s}"'.format(j_plain) if selector is not None: arg0 = arg0 + '\nwith selector: "{!s}"'.format(selector) if "max" in cmd: arg0 = arg0 + '\n(note to get the maximum of arrays use "fmax" not "max")'.format(cmd) if "min" in cmd: arg0 = arg0 + '\n(note to get the minimum of arrays use "fmin" not "min")'.format(cmd) if isinstance(exc, NameError): badname = str(exc).split("'")[1] goodnames = dir() from ....util.text_manip import case_insensitive_close_matches did_you_mean_list = case_insensitive_close_matches(badname, goodnames, n=3, cutoff=0.1, excpt=None) if len(did_you_mean_list) > 0: arg0 = arg0 + '\n' + "did you mean {}?".format( " or ".join("'{}'".format(s) for s in did_you_mean_list)) exc.args = (arg0,) + args[1:] raise def __contains__(self, item): if item in self._groupnode: return True return False def dtype_of(self, name): """dtype of raw data for a particular named data item.""" if name in self._groupnode._v_children: return self._groupnode._v_children[name].dtype raise KeyError(f"{name} not found") def load_data_item(self, name, result, selector=None): """Load a slice of the pod arrays into an array in memory""" # convert a single name string to a one item list _sqz_same(result.shape, [selector_len_for(selector, self.shape[0]), *self.shape[1:]]) try: result[:] = self._evaluate_single_item(name, selector) except IndexError: # https://github.com/PyTables/PyTables/issues/310 _temp = self._evaluate_single_item(name, None) try: result[:] = _temp[selector] except Exception as err: raise ValueError(f'_temp.shape={_temp.shape} selector.shape={selector.shape}') from err return result def load_meta_data_item(self, name, result, selector=None): """Load a slice of the pod arrays into an array in memory""" if selector is not None: import warnings warnings.warn('selector not compatible for load_meta_data_item') # convert a single name string to a one item list _sqz_same(result.shape, self.metashape) try: result[:] = self._evaluate_single_item(name, selector) except IndexError: # https://github.com/PyTables/PyTables/issues/310 result[:] = self._evaluate_single_item(name, None)[selector] return result def get_data_dictionary(self, name): """dictionary of raw data for a particular named data item.""" if name in self._groupnode._v_children: return self._groupnode._v_children[name].DICTIONARY raise KeyError(f"{name} not found") def __getitem__(self, item): if isinstance(item, tuple) and len(item)>=2 and isinstance(item[-1], slice): names, slice_ = item[:-1], item[-1] else: names = item slice_ = None # convert a single name string to a one item list if isinstance(names, str): names = [names,] dtype = numpy.float64 result = numpy.zeros( [selector_len_for(slice_, self.shape[0]), *self.shape[1:], len(names)], dtype=dtype) for i, cmd in enumerate(names): result[...,i] = self._evaluate_single_item(cmd, slice_) return result def _load_into(self, names, slc, result): """Load a slice of the pod arrays into an array in memory""" # convert a single name string to a one item list if isinstance(names, str): names = [names, ] _sqz_same(result.shape,[selector_len_for(slc, self.shape[0]), *self.shape[1:], len(names)]) for i, cmd in enumerate(names): result[..., i] = self._evaluate_single_item(cmd, slc) return result def load_into(self, names, selector, result): """Load a slice of the pod arrays into an array in memory""" # convert a single name string to a one item list if isinstance(names, str): names = [names, ] _sqz_same(result.shape, [selector_len_for(selector, self.shape[0]), *self.shape[1:], len(names)]) for i, cmd in enumerate(names): result[..., i] = self._evaluate_single_item(cmd, selector) return result def __getattr__(self, item): if item in self._groupnode._v_children: ret = self._groupnode._v_children[item] if isinstance(ret, tb.CArray): ret.__class__ = CArray return ret raise AttributeError(item) class _dataframe_factory(): def __init__(self, obj): self.obj = obj def __getattr__(self, item): return getattr(self.obj,item) def __getitem__(self, item): if len(self.obj.shape) > 1: try: metashape = self.obj.metashape except AttributeError: raise TypeError('dataframe access currently only compatible with 1d, use regular arrays for higher dimensions') else: if len(metashape) > 1: raise TypeError('dataframe access currently only compatible with 1d, use regular arrays for higher dimensions') if isinstance(item, tuple) and len(item)>=2 and isinstance(item[-1], slice): names, slice_ = item[:-1], item[-1] else: names = item slice_ = None # convert a single name string to a one item list if isinstance(names, str): names = [names,] result = pandas.DataFrame() for i, cmd in enumerate(names): j = self.obj._evaluate_single_item(cmd, selector=slice_) try: #result.loc[:,cmd] = j result = result.assign(**{cmd:j}) except: print() print(f"An error in tacking {cmd} to result") print(f"j.dtype is {j.dtype}") print(f"j.shape is {j.shape}") print(f"result.shape is {result.shape}") print() raise return result @property def dataframe(self): return self._dataframe_factory(self) def astype(self, t:str): from . import _pod_types cls = _pod_types[t.lower()] return cls(self) def statistics_for(self, var, histogram=True, selector=None, **kwargs): a = self.get_data_item(var) if isinstance(selector, str): selector = self.get_data_item(selector, None, dtype=bool) if selector is not None: a = a[selector] from ....util.statistics import statistics_for_array try: dictionary = self._groupnode._v_children[var]._v_attrs.DICTIONARY except: dictionary = None try: descrip = self._groupnode._v_children[var]._v_attrs.TITLE except: descrip = None result = statistics_for_array(a, histogram=histogram, varname=var, dictionary=dictionary, **kwargs) if descrip is not None and descrip!="": result.description = descrip if dictionary is not None: result.dictionary = Dict(dictionary) return result def statistics(self, vars=None, histogram=False, selector=None): if vars is None: vars = self.names() from ....util import Dict from ....util.arraytools import scalarize import numpy.ma as ma stats = pandas.DataFrame( columns=[ 'n', 'minimum', 'maximum', 'median', 'mean', 'stdev', 'nonzero_minimum', 'nonzero_maximum', 'nonzero_mean', 'nonzero_stdev', 'zeros', 'positives', 'negatives', ] + (['histogram'] if histogram else []), index = vars, ) for var in vars: if selector is not None: if isinstance(selector, slice): a = self[var, selector] else: a = self[var][selector] else: a = self[var] stats.loc[var,'n'] = scalarize(a.shape[0]) stats.loc[var,'minimum'] = scalarize(numpy.nanmin(a, axis=0)) stats.loc[var,'maximum'] = scalarize(numpy.nanmax(a, axis=0)) stats.loc[var,'median'] = scalarize(numpy.nanmedian(a, axis=0)) if histogram: from ....util.histograms import sizable_histogram_figure, seems_like_discrete_data try: dictionary = self.get_data_dictionary(var) except: dictionary = None stats.loc[var,'histogram'] = sizable_histogram_figure( a, title=None, xlabel=var, ylabel='Frequency', discrete=seems_like_discrete_data(a, dictionary) ) ax = ma.masked_array(a, mask=~numpy.isfinite(a)) stats.loc[var,'mean'] = scalarize(numpy.mean(ax, axis=0)) stats.loc[var,'stdev'] = scalarize(numpy.std(ax, axis=0)) stats.loc[var, 'zeros'] = scalarize(numpy.sum(numpy.logical_not(ax), axis=0)) stats.loc[var, 'positives'] = scalarize(numpy.sum(ax>0, axis=0)) stats.loc[var, 'negatives'] = scalarize(numpy.sum(ax<0, axis=0)) ax.mask |= (ax==0) stats.loc[var,'nonzero_minimum'] = scalarize(numpy.min(ax, axis=0)) stats.loc[var,'nonzero_maximum'] = scalarize(numpy.max(ax, axis=0)) stats.loc[var,'nonzero_mean'] = scalarize(numpy.mean(ax, axis=0)) stats.loc[var,'nonzero_stdev'] = scalarize(numpy.std(ax, axis=0)) if histogram: from ....util.dataframe import DataFrameViewer return DataFrameViewer(stats) return stats def get_row(self, rownum, lookup=True): result = Dict() for i in self.names(): result[i] = self._groupnode._v_children[i][rownum] if lookup: try: d = self._groupnode._v_children[i]._v_attrs.DICTIONARY except (KeyError, AttributeError): pass else: if result[i] in d: result[i] = f"{result[i]} ({d[result[i]]})" return result @property def vault(self): try: return self.__vault except: from ..h5util import get_or_create_group from ..h5vault import H5Vault v = get_or_create_group( self._h5f, self._groupnode, name='_VAULT_', title='', filters=None, createparents=False, skip_on_readonly=False ) self.__vault = H5Vault(v) return self.__vault
gpl-3.0
3,610,165,919,703,294,500
29.181992
185
0.650428
false
kyrsjo/AcdOpti
src/acdOpti/AcdOptiMeshTemplate.py
1
8600
# -*- coding: utf8 -*- # # Copyright 2011 Kyrre Ness Sjøbæk # This file is part of AcdOpti. # # AcdOpti is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # AcdOpti is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with AcdOpti. If not, see <http://www.gnu.org/licenses/>. from AcdOptiExceptions import AcdOptiException_meshTemplate_createFail,\ AcdOptiException_meshTemplate_loadFail,\ AcdOptiException_meshTemplate_lockdownError,\ AcdOptiException_dataDict_getValsSingle from AcdOptiFileParser import AcdOptiFileParser_simple, DataDict from AcdOptiCubitTemplateFile import AcdOptiCubitTemplateFile import os class AcdOptiMeshTemplate: """ Class that represents a specific mesh template script, which can be applied in different configurations to different geometry instances. One mesh template = meshing script with variables + default values for those variables. """ def __init__(self, folder): self.folder = folder #Construct the instance name from folder instname = folder if instname[-1] == "/": instname = instname[0:-1] self.instName = instname = os.path.split(instname)[1] #Load the param file try: self.__paramFile = AcdOptiFileParser_simple(\ os.path.join(self.folder, "paramFile.set"), 'rw') except IOError: raise AcdOptiException_meshTemplate_loadFail("File paramFile.set not found") if self.__paramFile.dataDict.getValSingle("fileID")\ != "meshTemplateParamFile": raise AcdOptiException_meshTemplate_loadFail\ ("Wrong fileID, got \""\ + self.__paramFile.dataDict.getValSingle("fileID")\ + "\" while loading paramFile") if self.__paramFile.dataDict.getValSingle("instName")\ != instname: raise AcdOptiException_meshTemplate_loadFail("templateName doesn't match folder name") lock = self.__paramFile.dataDict.getValSingle("lockdown") if lock == "True": self.lockdown = True elif lock == "False": self.lockdown = False else: raise AcdOptiException_meshTemplate_loadFail(\ "Invalid content in field \"lockdown\" of file paramFile.set, got'"+lock+"'") #Load the default parameters self.__paramDefaults = {} try: paramDefaults_data = self.__paramFile.dataDict.getValSingle("paramDefaults") except AcdOptiException_dataDict_getValsSingle: raise AcdOptiException_meshTemplate_loadFail\ ("Couldn't load paramDefaults from file paramFile.set") if not isinstance(paramDefaults_data,DataDict): raise AcdOptiException_meshTemplate_loadFail\ ("paramDefaults from paramFile is not a DataDict!") for (k,v) in zip(paramDefaults_data.keys, paramDefaults_data.vals): if k in self.__paramDefaults: raise AcdOptiException_meshTemplate_loadFail\ ("Double occurrence of key \"" + k + "\" in paramFile") self.__paramDefaults[k] = v #Load the template file self.meshTemplateFile = AcdOptiCubitTemplateFile(os.path.join(folder,self.meshTemplateFile_name)) #Initialize __meshInstances self.__meshInstances = [] def mayDelete(self,key): #TODO!! """ Check if key can be deleted - if it has it been overridden in a MeshInstance or (TODO) is used in the cubit template script, return the problematic AcdOptiMeshInstance or AcdOptiCubitTemplateFile. Normally return None, meaning that the key may be deleted. """ print "AcdOptiMeshTemplate::mayDelete()" for mi in self.__meshInstances: if key in mi.templateOverrides_getKeys(): return mi return None def write(self): """ Write the current contents of this class to paramFile, which is written to file. """ print "AcdOptiMeshTemplate::write()" self.__paramFile.dataDict.setValSingle("lockdown", str(self.lockdown)) paramDefaults_data = self.__paramFile.dataDict.getValSingle("paramDefaults") paramDefaults_data.clear() for (k,v) in self.__paramDefaults.iteritems(): paramDefaults_data.pushBack(k,v) self.__paramFile.write() def registerInstance(self, instance): """ Register a mesh instance with this mesh template. """ self.__meshInstances.append(instance) def setLockdown(self): """ Set lockdown = True, indicating that a mesh instance has been generated, and that the template should not be touched. Also writes data to file. """ print "AcdOptMeshTemplate::setLockdown()" self.lockdown = True self.write() def clearLockdown(self): """ Clears lockdown of the mesh template and any mesh instances, deleting generated cubit objects. Also writes instance to file. """ print "AcdOptiMeshTemplate::clearLockdown()" for meshInstance in self.__meshInstances: meshInstance.clearLockdown() self.lockdown = False self.write() def paramDefaults_insert(self,key,val): if self.lockdown: raise AcdOptiException_meshTemplate_lockdownError self.__paramDefaults[key] = val return val def paramDefaults_get(self,key): return self.__paramDefaults[key] def paramDefaults_getKeys(self): return self.__paramDefaults.keys() def paramDefaults_copy(self): return self.__paramDefaults.copy() def paramDefaults_len(self): return len(self.__paramDefaults) def paramDefaults_del(self,key): if self.lockdown: raise AcdOptiException_meshTemplate_lockdownError val = self.__paramDefaults[key] del self.__paramDefaults[key] return val def paramDefaults_clear(self): if self.lockdown: raise AcdOptiException_meshTemplate_lockdownError self.__paramDefaults.clear() @staticmethod def createNew(folder): """ Creates a new empty meshTemplate in a not previously existing folder. Folder name should be the same as geometry instance name. Raises AcdOptiException_meshTemplate_createFail is something goes wrong (such as "Folder already exists") """ #Construct the instance name from folder instname = folder if instname[-1] == "/": instname = instname[0:-1] instname = os.path.split(instname)[1] if os.path.isdir(folder): raise AcdOptiException_meshTemplate_createFail ("Folder \"" + folder + "\" already exists") os.mkdir(folder) #Create the paramFile paramFile = AcdOptiFileParser_simple(\ os.path.join(folder, "paramFile.set"), 'w') paramFile.dataDict.pushBack("fileID", "meshTemplateParamFile") paramFile.dataDict.pushBack("instName", instname) paramFile.dataDict.pushBack("lockdown", "False") paramFile.dataDict.pushBack("paramDefaults", DataDict()) paramFile.write() #Default empty template file AcdOptiCubitTemplateFile.createEmpty(os.path.join(folder,AcdOptiMeshTemplate.meshTemplateFile_name)) #Object variables folder = None #Folder where this template lives instName = None #Name of this template meshTemplateFile_name = "meshTemplateFile.jou.template" meshTemplateFile = None __meshInstances = None #List of mesh instances implementing this template lockdown = False __paramFile = None __paramDefaults = None
gpl-3.0
-3,208,652,159,724,971,500
37.044248
108
0.625843
false
jumpinjackie/fdotoolbox
Thirdparty/IronPython/Tutorial/wfdemo.py
1
1157
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Microsoft Public License. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Microsoft Public License, please send an email to # [email protected]. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Microsoft Public License. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### import clr clr.AddReferenceByPartialName("System.Windows.Forms") clr.AddReferenceByPartialName("System.Drawing") from System.Windows.Forms import * from System.Drawing import * f = Form() font = Font("Verdana", 15) f.Text = "My First Interactive Application" def click(f, a): l = Label(Text = "Hello") l.AutoSize = True l.Location = a.Location l.Font = font f.Controls.Add(l) f.Click += click Application.Run(f)
lgpl-2.1
764,337,017,331,379,100
32.057143
97
0.624028
false
kperun/nestml
pynestml/meta_model/ast_return_stmt.py
1
2517
# # ast_return_stmt.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. from pynestml.meta_model.ast_expression import ASTExpression from pynestml.meta_model.ast_node import ASTNode from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression class ASTReturnStmt(ASTNode): """ This class is used to store a return statement. A ReturnStmt Models the return statement in a function. @attribute minus An optional sing @attribute definingVariable Name of the variable Grammar: returnStmt : 'return' expr?; Attributes: expression (ASTSimpleExpression or ASTExpression): An rhs representing the returned value. """ def __init__(self, expression=None, source_position=None): """ Standard constructor. :param expression: an rhs. :type expression: ASTExpression :param source_position: the position of this element in the source file. :type source_position: ASTSourceLocation. """ super(ASTReturnStmt, self).__init__(source_position) self.expression = expression def has_expression(self): """ Returns whether the return statement has an rhs or not. :return: True if has rhs, otherwise False. :rtype: bool """ return self.expression is not None def get_expression(self): """ Returns the rhs. :return: an rhs. :rtype: ASTExpression """ return self.expression def equals(self, other): """ The equals method. :param other: a different object. :type other: object :return: True if equal, otherwise False. :rtype: bool """ if not isinstance(other, ASTReturnStmt): return False return self.get_expression().equals(other.get_expression())
gpl-2.0
-5,287,949,223,947,370,000
32.56
100
0.663886
false
platformio/platformio-core
platformio/project/helpers.py
1
5897
# Copyright (c) 2014-present PlatformIO <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from hashlib import sha1 from os import walk from os.path import dirname, isdir, isfile, join from click.testing import CliRunner from platformio import __version__, exception, fs from platformio.compat import IS_WINDOWS, hashlib_encode_data from platformio.project.config import ProjectConfig def get_project_dir(): return os.getcwd() def is_platformio_project(project_dir=None): if not project_dir: project_dir = get_project_dir() return isfile(join(project_dir, "platformio.ini")) def find_project_dir_above(path): if isfile(path): path = dirname(path) if is_platformio_project(path): return path if isdir(dirname(path)): return find_project_dir_above(dirname(path)) return None def get_project_core_dir(): """Deprecated, use ProjectConfig.get_optional_dir("core") instead""" return ProjectConfig.get_instance( join(get_project_dir(), "platformio.ini") ).get_optional_dir("core", exists=True) def get_project_cache_dir(): """Deprecated, use ProjectConfig.get_optional_dir("cache") instead""" return ProjectConfig.get_instance( join(get_project_dir(), "platformio.ini") ).get_optional_dir("cache") def get_project_global_lib_dir(): """ Deprecated, use ProjectConfig.get_optional_dir("globallib") instead "platformio-node-helpers" depends on it """ return ProjectConfig.get_instance( join(get_project_dir(), "platformio.ini") ).get_optional_dir("globallib") def get_project_lib_dir(): """ Deprecated, use ProjectConfig.get_optional_dir("lib") instead "platformio-node-helpers" depends on it """ return ProjectConfig.get_instance( join(get_project_dir(), "platformio.ini") ).get_optional_dir("lib") def get_project_libdeps_dir(): """ Deprecated, use ProjectConfig.get_optional_dir("libdeps") instead "platformio-node-helpers" depends on it """ return ProjectConfig.get_instance( join(get_project_dir(), "platformio.ini") ).get_optional_dir("libdeps") def get_default_projects_dir(): docs_dir = join(fs.expanduser("~"), "Documents") try: assert IS_WINDOWS import ctypes.wintypes # pylint: disable=import-outside-toplevel buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH) ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, buf) docs_dir = buf.value except: # pylint: disable=bare-except pass return join(docs_dir, "PlatformIO", "Projects") def compute_project_checksum(config): # rebuild when PIO Core version changes checksum = sha1(hashlib_encode_data(__version__)) # configuration file state checksum.update(hashlib_encode_data(config.to_json())) # project file structure check_suffixes = (".c", ".cc", ".cpp", ".h", ".hpp", ".s", ".S") for d in ( config.get_optional_dir("include"), config.get_optional_dir("src"), config.get_optional_dir("lib"), ): if not isdir(d): continue chunks = [] for root, _, files in walk(d): for f in files: path = join(root, f) if path.endswith(check_suffixes): chunks.append(path) if not chunks: continue chunks_to_str = ",".join(sorted(chunks)) if IS_WINDOWS: # case insensitive OS chunks_to_str = chunks_to_str.lower() checksum.update(hashlib_encode_data(chunks_to_str)) return checksum.hexdigest() def load_project_ide_data(project_dir, env_or_envs, cache=False): assert env_or_envs env_names = env_or_envs if not isinstance(env_names, list): env_names = [env_names] with fs.cd(project_dir): result = _load_cached_project_ide_data(project_dir, env_names) if cache else {} missed_env_names = set(env_names) - set(result.keys()) if missed_env_names: result.update(_load_project_ide_data(project_dir, missed_env_names)) if not isinstance(env_or_envs, list) and env_or_envs in result: return result[env_or_envs] return result or None def _load_project_ide_data(project_dir, env_names): # pylint: disable=import-outside-toplevel from platformio.commands.run.command import cli as cmd_run args = ["--project-dir", project_dir, "--target", "_idedata"] for name in env_names: args.extend(["-e", name]) result = CliRunner().invoke(cmd_run, args) if result.exit_code != 0 and not isinstance( result.exception, exception.ReturnErrorCode ): raise result.exception if '"includes":' not in result.output: raise exception.PlatformioException(result.output) return _load_cached_project_ide_data(project_dir, env_names) def _load_cached_project_ide_data(project_dir, env_names): build_dir = ProjectConfig.get_instance( join(project_dir, "platformio.ini") ).get_optional_dir("build") result = {} for name in env_names: if not os.path.isfile(os.path.join(build_dir, name, "idedata.json")): continue with open(os.path.join(build_dir, name, "idedata.json")) as fp: result[name] = json.load(fp) return result
apache-2.0
7,943,287,062,079,929,000
31.401099
87
0.658301
false
cloudwalkio/docker-aws-cli
aws_cli.py
1
1450
#!/usr/bin/python """ Tool to setup AWS CLI. """ import os import sys from subprocess import check_output def setup_s3(s3_access_key, s3_secret_key): """Create S3 configuration file.""" home = os.path.expanduser("~") aws_dir = os.path.join(home, '.aws') if not os.path.exists(aws_dir): os.makedirs(aws_dir) # Write config file with open(os.path.join(aws_dir, 'config'), 'w') as f: f.write('[default]\n') # Write to disk S3cmd config file with open(os.path.join(aws_dir, 'credentials'), 'w') as f: credentials = '[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n' % (s3_access_key, s3_secret_key) f.write(credentials) def execute(command): """ Execute external host command and print it's output.""" output = check_output(command) print output.rstrip() def print_usage(): print "Usage: docker run -e S3_ACCESS_KEY=[PUT KEY HERE] -e S3_SECRET_KEY=[PUT KEY HERE] cloudwalk/aws [PUT COMMAND HERE]" if __name__ == '__main__': # Get expected environment variables access_key = os.getenv('S3_ACCESS_KEY') secret_key = os.getenv('S3_SECRET_KEY') if access_key is None or secret_key is None: print_usage() sys.exit(1) # Create AWS config file setup_s3(access_key, secret_key) # Execute aws command appended by whatever arguments is passed to this script command = ['aws'] + sys.argv[1:] execute(command)
mit
-4,588,440,829,602,186,000
31.222222
126
0.641379
false
jangeador/django-datatable-view
datatableview/columns.py
1
22152
# -*- encoding: utf-8 -*- import re import operator from datetime import datetime try: from functools import reduce except ImportError: pass import django from django.db import models from django.db.models import Model, Manager, Q from django.db.models.fields import FieldDoesNotExist from django.core.exceptions import ObjectDoesNotExist from django.utils.encoding import smart_text from django.utils.safestring import mark_safe try: from django.forms.util import flatatt except ImportError: from django.forms.utils import flatatt from django.template.defaultfilters import slugify try: from django.utils.encoding import python_2_unicode_compatible except ImportError: from .compat import python_2_unicode_compatible import six import dateutil.parser from .utils import resolve_orm_path, DEFAULT_EMPTY_VALUE, DEFAULT_MULTIPLE_SEPARATOR # Registry of Column subclasses to their declared corresponding ModelFields. # The registery is an ordered priority list, containing 2-tuples of a Column subclass and a list of # classes that the column will service. COLUMN_CLASSES = [] STRPTIME_PLACEHOLDERS = { 'year': ('%y', '%Y'), 'month': ('%m', '%b', '%B'), 'day': ('%d',),# '%a', '%A'), # day names are hard because they depend on other date info 'hour': ('%H', '%I'), 'minute': ('%M',), 'second': ('%S',), 'week_day': ('%w',), } def register_simple_modelfield(model_field): column_class = get_column_for_modelfield(model_field) COLUMN_CLASSES.insert(0, (column_class, [model_field])) def get_column_for_modelfield(model_field): """ Return the built-in Column class for a model field class. """ # If the field points to another model, we want to get the pk field of that other model and use # that as the real field. It is possible that a ForeignKey points to a model with table # inheritance, however, so we need to traverse the internal OneToOneField as well, so this will # climb the 'pk' field chain until we have something real. while model_field.rel: model_field = model_field.rel.to._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass def get_attribute_value(obj, bit): try: value = getattr(obj, bit) except (AttributeError, ObjectDoesNotExist): value = None else: if callable(value) and not isinstance(value, Manager): if not hasattr(value, 'alters_data') or value.alters_data is not True: value = value() return value class ColumnMetaclass(type): """ Column type for automatic registration of column types as ModelField handlers. """ def __new__(cls, name, bases, attrs): new_class = super(ColumnMetaclass, cls).__new__(cls, name, bases, attrs) if new_class.model_field_class: COLUMN_CLASSES.insert(0, (new_class, [new_class.model_field_class])) if new_class.handles_field_classes: COLUMN_CLASSES.insert(0, (new_class, new_class.handles_field_classes)) return new_class # Corollary to django.forms.fields.Field @python_2_unicode_compatible class Column(six.with_metaclass(ColumnMetaclass)): """ Generic table column using CharField for rendering. """ model_field_class = None handles_field_classes = [] lookup_types = () # Tracks each time a Field instance is created. Used to retain order. creation_counter = 0 def __init__(self, label=None, sources=None, processor=None, source=None, separator=DEFAULT_MULTIPLE_SEPARATOR, empty_value=DEFAULT_EMPTY_VALUE, model_field_class=None, sortable=True, visible=True, localize=False, allow_regex=False, allow_full_text_search=False): if model_field_class: self.model_field_class = model_field_class if source and sources: raise ValueError("Cannot provide 'source' and 'sources' at the same time.") if source: sources = source self.name = None # Set outside, once the Datatable can put it there if label is not None: label = smart_text(label) self.sources = sources or [] # TODO: Process for real/virtual if not isinstance(self.sources, (tuple, list)): self.sources = [self.sources] self.separator = separator self.label = label self.empty_value = smart_text(empty_value) self.localize = localize self.sortable = sortable self.visible = visible self.processor = processor self.allow_regex = allow_regex self.allow_full_text_search = allow_full_text_search if not self.sources: self.sortable = False # To be filled in externally once the datatable has ordering figured out. self.sort_priority = None self.sort_direction = None self.index = None # Increase the creation counter, and save our local copy. self.creation_counter = Column.creation_counter Column.creation_counter += 1 def __repr__(self): return '<%s.%s "%s">' % (self.__class__.__module__, self.__class__.__name__, self.label) def value(self, obj, **kwargs): """ Calls :py:meth:`.get_initial_value` to obtain the value from ``obj`` that this column's :py:attr:`.sources` list describes. Any supplied ``kwargs`` are forwarded to :py:meth:`.get_initial_value`. Returns the 2-tuple of ``(plain_value, rich_value)`` for the inspection and serialization phases of serialization. """ values = self.get_initial_value(obj, **kwargs) if not isinstance(values, (tuple, list)): values = (values, values) return values def get_initial_value(self, obj, **kwargs): """ Builds a list of values provided by :py:attr:`.sources` looked up on the target ``obj``. Each source may provide a value as a 2-tuple of ``(plain_value, rich_value)``, where ``plain_value`` is the sortable raw value, and ``rich_value`` is possibly something else that can be coerced to a string for display purposes. The ``rich_value`` could also be a string with HTML in it. If no 2-tuple is given, then ``plain_value`` and ``rich_value`` are taken to be the same. Columns with multiple :py:attr:`.sources` will have their ``rich_value`` coerced to a string and joined with :py:attr:`.separator`, and this new concatenated string becomes the final ``rich_value`` for the whole column. If all :py:attr:`.sources` are ``None``, :py:attr:`.empty_value` will be used as the ``rich_value``. """ values = [] for source in self.sources: result = self.get_source_value(obj, source, **kwargs) for value in result: if isinstance(value, Model): value = (value.pk, value) if value is not None: if not isinstance(value, (tuple, list)): value = (value, value) values.append(value) if len(values) == 1: value = values[0] if value is None and self.empty_value is not None: value = self.empty_value elif len(values) > 0: plain_value = [v[0] for v in values] rich_value = self.separator.join(map(six.text_type, [v[1] for v in values])) value = (plain_value, rich_value) else: value = self.empty_value return value def get_source_value(self, obj, source, **kwargs): """ Retrieves the value from ``obj`` associated with some ``source`` description. Should return a list whose length is determined by the number of sources consulted. In the default case, this is always just 1, but compound columns that declare their components with nested ``Column`` instances will have sources of their own and need to return a value per nested source. """ if hasattr(source, "__call__"): value = source(obj) elif isinstance(obj, Model): value = reduce(get_attribute_value, [obj] + source.split('__')) elif isinstance(obj, dict): # ValuesQuerySet item value = obj[source] else: raise ValueError("Unknown object type %r" % (repr(obj),)) return [value] def get_processor_kwargs(self, **extra_kwargs): """ Returns a dictionary of kwargs that should be sent to this column's :py:attr:`processor` callback. """ kwargs = { 'localize': self.localize, } kwargs.update(extra_kwargs) return kwargs def get_db_sources(self, model): """ Returns the list of sources that match fields on the given ``model`` class. """ sources = [] for source in self.sources: target_field = self.resolve_source(model, source) if target_field: sources.append(source) return sources def get_virtual_sources(self, model): """ Returns the list of sources that do not match fields on the given ``model`` class. """ sources = [] for source in self.sources: target_field = self.resolve_source(model, source) if target_field is None: sources.append(source) return sources def get_sort_fields(self, model): """ Returns the list of sources that match fields on the given ``model`` class. This will be the list of database-backed fields. """ return self.get_db_sources(model) def expand_source(self, source): return (source,) def resolve_source(self, model, source): # Try to fetch the leaf attribute. If this fails, the attribute is not database-backed and # the search for the first non-database field should end. if hasattr(source, "__call__"): return None try: return resolve_orm_path(model, source) except FieldDoesNotExist: return None def get_source_handler(self, model, source): """ Return handler instance for lookup types and term coercion. """ return self # Interactivity features def prep_search_value(self, term, lookup_type): """ Coerce the input term to work for the given lookup_type. Returns the coerced term, or ``None`` if the term and lookup_type are incompatible together. """ # We avoid making changes that the Django ORM can already do for us multi_terms = None if isinstance(term, six.text_type): if lookup_type == "in": in_bits = re.split(r',\s*', term) if len(in_bits) > 1: multi_terms = in_bits else: term = None if lookup_type == "range": range_bits = re.split(r'\s*-\s*', term) if len(range_bits) == 2: multi_terms = range_bits else: term = None if multi_terms: return filter(None, (self.prep_search_value(multi_term, lookup_type) for multi_term in multi_terms)) model_field = self.model_field_class() try: term = model_field.get_prep_value(term) except: term = None return term def get_lookup_types(self, handler=None): """ Generates the list of valid ORM lookup operators, taking into account runtime options for the ``allow_regex`` and ``allow_full_text_search`` options. """ lookup_types = self.lookup_types if handler: lookup_types = handler.lookup_types # Add regex and MySQL 'search' operators if requested for the original column definition if self.allow_regex and 'iregex' not in lookup_types: lookup_types += ('iregex',) if self.allow_full_text_search and 'search' not in lookup_types: lookup_types += ('search',) return lookup_types def search(self, model, term, lookup_types=None): """ Returns the ``Q`` object representing queries to make against this column for the given term. It is the responsibility of this method to decide which of this column's sources are database-backed and which are names of instance attributes, properties, or methods. The ``model`` is provided to identify Django ORM ``ModelField`` s and related models. The default implementation resolves each contributing ``source`` from :py:attr:`sources`, and uses :py:meth:`.prep_search_value` to coerce the input search ``term`` to something usable for each of the query :py:attr:`lookup_types` supported by the column. Any failed coercions will be skipped. The default implementation will also discover terms that match the source field's ``choices`` labels, flipping the term to automatically query for the internal choice value. """ sources = self.get_db_sources(model) column_queries = [] for source in sources: handler = self.get_source_handler(model, source) for sub_source in self.expand_source(source): modelfield = resolve_orm_path(model, sub_source) if modelfield.choices: if hasattr(modelfield, 'get_choices'): choices = modelfield.get_choices() else: choices = modelfield.get_flatchoices() for db_value, label in choices: if term.lower() in label.lower(): k = '%s__exact' % (sub_source,) column_queries.append(Q(**{k: str(db_value)})) if not lookup_types: lookup_types = handler.get_lookup_types() for lookup_type in lookup_types: coerced_term = handler.prep_search_value(term, lookup_type) if coerced_term is None: # Skip terms that don't work with the lookup_type continue elif lookup_type in ('in', 'range') and not isinstance(coerced_term, tuple): # Skip attempts to build multi-component searches if we only have one term continue k = '%s__%s' % (sub_source, lookup_type) column_queries.append(Q(**{k: coerced_term})) if column_queries: q = reduce(operator.or_, column_queries) else: q = None return q # Template rendering def __str__(self): """ Renders a simple ``<th>`` element with ``data-name`` attribute. All items found in the ``self.attributes`` dict are also added as dom attributes. """ return mark_safe(u"""<th data-name="{name_slug}"{attrs}>{label}</th>""".format(**{ 'name_slug': slugify(self.label), 'attrs': self.attributes, 'label': self.label, })) @property def attributes(self): """ Returns a dictionary of initial state data for sorting, sort direction, and visibility. The default attributes include ``data-config-sortable``, ``data-config-visible``, and (if applicable) ``data-config-sorting`` to hold information about the initial sorting state. """ attributes = { 'data-config-sortable': 'true' if self.sortable else 'false', 'data-config-visible': 'true' if self.visible else 'false', } if self.sort_priority is not None: attributes['data-config-sorting'] = ','.join(map(six.text_type, [ self.sort_priority, self.index, self.sort_direction, ])) return flatatt(attributes) class TextColumn(Column): model_field_class = models.CharField handles_field_classes = [models.CharField, models.TextField, models.FileField] lookup_types = ('icontains', 'in') class DateColumn(Column): model_field_class = models.DateField handles_field_classes = [models.DateField] lookup_types = ('exact', 'in', 'range', 'year', 'month', 'day', 'week_day') def prep_search_value(self, term, lookup_type): if lookup_type in ('exact', 'in', 'range'): try: date_obj = dateutil.parser.parse(term) except ValueError: # This exception is theoretical, but it doesn't seem to raise. pass except TypeError: # Failed conversions can lead to the parser adding ints to None. pass else: return date_obj if lookup_type not in ('exact', 'in', 'range'): test_term = term if lookup_type == 'week_day': try: test_term = int(test_term) - 1 # Django ORM uses 1-7, python strptime uses 0-6 except: return None else: test_term = str(test_term) for test_format in STRPTIME_PLACEHOLDERS[lookup_type]: # Try to validate the term against the given date lookup type try: date_obj = datetime.strptime(test_term, test_format) except ValueError: pass else: if lookup_type == 'week_day': term = date_obj.weekday() + 1 # Django ORM uses 1-7, python strptime uses 0-6 else: term = getattr(date_obj, lookup_type) return str(term) return super(DateColumn, self).prep_search_value(term, lookup_type) class DateTimeColumn(DateColumn): model_field_class = models.DateTimeField handles_field_classes = [models.DateTimeField] lookups_types = ('exact', 'in', 'range', 'year', 'month', 'day', 'week_day') if django.VERSION >= (1, 6): DateTimeColumn.lookup_types += ('hour', 'minute', 'second') class BooleanColumn(Column): model_field_class = models.BooleanField handles_field_classes = [models.BooleanField, models.NullBooleanField] lookup_types = ('exact', 'in') def prep_search_value(self, term, lookup_type): term = term.lower() # Allow column's own label to represent a true value if term == 'true' or term.lower() in self.label.lower(): term = True elif term == 'false': term = False else: return None return super(BooleanColumn, self).prep_search_value(term, lookup_type) class IntegerColumn(Column): model_field_class = models.IntegerField handles_field_classes = [models.IntegerField, models.AutoField] lookup_types = ('exact', 'in') class FloatColumn(Column): model_field_class = models.FloatField handles_field_classes = [models.FloatField, models.DecimalField] lookup_types = ('exact', 'in') class CompoundColumn(Column): """ Special column type for holding multiple sources that have different model field types. The separation of sources by type is important because of the different query lookup types that are allowed against different model fields. Each source will dynamically find its associated model field and choose an appropriate column class from the registry. To more finely control which column class is used, an actual column instance can be given instead of a string name which declares its own ``source`` or ``sources``. Because they are not important to the client-side representation of the column, no ``label`` is necessary for nested column instances. """ model_field_class = None handles_field_classes = [] lookup_types = () def expand_source(self, source): if isinstance(source, Column): return source.sources return super(CompoundColumn, self).expand_source(source) def get_source_value(self, obj, source, **kwargs): """ Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated. """ result = [] for sub_source in self.expand_source(source): # Call super() to get default logic, but send it the 'sub_source' sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs) result.extend(sub_result) return result def get_db_sources(self, model): return self.sources def get_sort_fields(self, model): return self._get_flat_db_sources(model) def _get_flat_db_sources(self, model): """ Return a flattened representation of the individual ``sources`` lists. """ sources = [] for source in self.sources: for sub_source in self.expand_source(source): target_field = self.resolve_source(model, sub_source) if target_field: sources.append(sub_source) return sources def get_source_handler(self, model, source): """ Allow the nested Column source to be its own handler. """ if isinstance(source, Column): return source # Generate a generic handler for the source modelfield = resolve_orm_path(model, source) column_class = get_column_for_modelfield(modelfield) return column_class() class DisplayColumn(Column): """ Convenience column type for unsearchable, unsortable columns, which rely solely on a processor function to generate content. """ model_field_class = None lookup_types = ()
apache-2.0
-6,293,452,412,420,013,000
37.193103
112
0.603061
false
SymbiFlow/symbiflow-arch-defs
utils/vpr_pbtype_arch_wrapper.py
1
11275
#!/usr/bin/env python3 """ Tool for generate an arch.xml file which includes pb_type.xml and model.xml files for testing with Verilog to Routing. Primarily used by the vpr_test_pbtype cmake function (which is automatically part of v2x_test_both cmake function). """ import argparse import math import os import os.path import subprocess import sys import tempfile from typing import List, Dict, Tuple import lxml.etree as ET from lib import xmlinc from lib.flatten import flatten from lib.pb_type import ports, find_leaf FILEDIR_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__))) TEMPLATE_PATH = os.path.abspath( os.path.join(FILEDIR_PATH, "template.arch.xml") ) XPos = int YPos = int GridDict = Dict[Tuple[XPos, YPos], str] def grid_new(width: int, height: int) -> GridDict: """Generate an empty grid dictionary.""" tiles = {} for x in range(width): for y in range(0, height): tiles[(x, y)] = '.' return tiles def grid_size(tiles: GridDict) -> Tuple[XPos, YPos]: """Get width and height for a grid dictionary.""" width = max(x for x, _ in tiles.keys()) + 1 height = max(y for _, y in tiles.keys()) + 1 return (width, height) def grid_format(tiles: GridDict) -> str: """Print a nicely formatted string from grid dictionary. >>> print(grid_format({ ... (0, 0): "A", (1, 0): "B", (2, 0): "C", ... (0, 1): "X", (1, 1): "Y", (2, 1): "Z", ... })) 012 0 ABC 1 XYZ >>> print(grid_format({ ... (0, 0): "A", (1, 0): "B", (2, 0): "C", (3, 0): "D", (4, 0): "E", ... (0, 1): "M", (1, 1): "N", (2, 1): "O", (3, 1): "P", (4, 1): "Q", ... (0, 2): "Z", (1, 2): "Y", (2, 2): "X", (3, 2): "W", (4, 2): "V", ... })) 01234 0 ABCDE 1 MNOPQ 2 ZYXWV """ width, height = grid_size(tiles) s = [] # X header s.append(" ") assert width < 10, width for x in range(0, width): s.append(str(x)) s.append("\n") for y in range(0, height): # Y header s.append("{: 2} ".format(y)) for x in range(0, width): s.append(str(tiles[(x, y)])[0]) s.append('\n') return "".join(s[:-1]) def grid_place_in_column(tiles: GridDict, x: XPos, values: List[str]): """Place a list of values into grid centered vertical in a given column. Modifies the grid dictionary in place. >>> t = grid_new(5, 4) >>> grid_place_in_column(t, 1, ['I']) >>> print(grid_format(t)) 01234 0 ..... 1 .I... 2 ..... 3 ..... >>> t = grid_new(5, 4) >>> grid_place_in_column(t, 1, ['I', 'I']) >>> grid_place_in_column(t, 3, ['O']) >>> print(grid_format(t)) 01234 0 ..... 1 .I.O. 2 .I... 3 ..... """ width, height = grid_size(tiles) start = math.floor((height - len(values)) / 2) for i in range(0, len(values)): tiles[(x, start + i)] = values[i] def grid_generate(input_pins: List[str], output_pins: List[str]) -> GridDict: """Generate a grid dict to fit a set of input_pins and output_pins. Generates a 5 width grid with following columns; Column 0 - One input tile per input pins. Column 1 - padding Column 2 - A single tile. Column 3 - padding Column 4 - One output tile per output pins. Returns ------- GridDict Generate a grid dict to fit a set of input_pins and output_pins. """ height = max(len(input_pins), len(output_pins)) + 2 width = len(['I', '.', 'T', '.', 'O']) tiles = grid_new(width, height) grid_place_in_column(tiles, 0, ['I'] * len(input_pins)) grid_place_in_column(tiles, 2, ['T'] * (height - 2)) grid_place_in_column(tiles, 4, ['O'] * len(output_pins)) return tiles def layout_xml(arch_xml: ET.Element, pbtype_xml: ET.Element) -> int: """Generate a `<layout>` with IBUF and OBUF to match given pb_type. Modifies the giving architecture XML in place. Returns ------- int The height of the new layout. """ pbtype_name, clocks, inputs, outputs, carry = ports(pbtype_xml) finputs = [d for s, d in flatten(clocks + inputs)] foutputs = [d for s, d in flatten(outputs)] tiles = grid_generate(finputs, foutputs) width, height = grid_size(tiles) layouts = arch_xml.find("layout") layout = ET.SubElement( layouts, "fixed_layout", { "name": "device", # FIXME: See https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/277 # "width": str(width), # "height": str(height), "width": str(max(width, height)), "height": str(max(width, height)), }, ) layout.append(ET.Comment('\n' + grid_format(tiles) + '\n')) for x, y in tiles.keys(): v = tiles[(x, y)] if v == '.': continue elif v == 'I': t = 'IBUF' elif v == 'O': t = 'OBUF' elif v == 'T': # FIXME: Is this needed? if y > 1: continue t = 'TILE' else: raise Exception("Unknown tile type {}".format(v)) ET.SubElement( layout, "single", { "type": t, "priority": "1", "x": str(x), "y": str(y) } ) return max(len(finputs), len(foutputs)) def tile_xml( arch_xml: ET.Element, pbtype_xml: ET.Element, outfile: str, tile_height: int ): """Generate a top level pb_type containing given pb_type. Modifies the giving architecture XML in place. Returns ------- str The name of the top level pb_type (the tile). """ name, clocks, inputs, outputs, carry = ports(pbtype_xml) assert name != "TILE", "name ({}) must not be TILE".format(name) cbl = arch_xml.find("complexblocklist") tile = ET.SubElement( cbl, "pb_type", { "name": "TILE", "width": "1", "height": str(tile_height) }, ) dirpath = os.path.dirname(outfile) xmlinc.include_xml( tile, os.path.join(dirpath, "{}.pb_type.xml".format(name.lower())), outfile, ) # Pin locations ploc = ET.SubElement( tile, "pinlocations", {"pattern": "custom"}, ) ilocs = [] olocs = [] for i in range(0, tile_height): ilocs.append( ET.SubElement( ploc, "loc", { "side": "left", "xoffset": "0", "yoffset": str(i) }, ) ) olocs.append( ET.SubElement( ploc, "loc", { "side": "right", "xoffset": "0", "yoffset": str(i) }, ) ) # Interconnect connect = ET.SubElement( tile, "interconnect", ) # Clock pins for d, s in flatten(clocks): ET.SubElement( tile, "clock", { "name": s, "num_pins": "1", "equivalent": "none" }, ) ET.SubElement( connect, "direct", { "input": "TILE.{}".format(s), "name": "TILE.{}-{}.{}".format(s, name, d), "output": "{}.{}".format(name, d), }, ) for i in range(0, tile_height): ET.SubElement(ilocs[i], 'port', {'name': s}) # Input Pins for d, s in flatten(inputs): ET.SubElement( tile, "input", { "name": s, "num_pins": "1", "equivalent": "none" }, ) ET.SubElement( connect, "direct", { "input": "TILE.{}".format(s), "name": "TILE.{}-{}.{}".format(s, name, d), "output": "{}.{}".format(name, d), }, ) for i in range(0, tile_height): ET.SubElement(ilocs[i], 'port', {'name': s}) # Output Pins for s, d in flatten(outputs): ET.SubElement( tile, "output", { "name": d, "num_pins": "1", "equivalent": "none" }, ) ET.SubElement( connect, "direct", { "input": "{}.{}".format(name, s), "name": "TILE.{}-{}.{}".format(s, name, d), "output": "TILE.{}".format(d), }, ) for i in range(0, tile_height): ET.SubElement(olocs[i], 'port', {'name': d}) return name def pretty_xml(xml: ET.Element, xmllint: str = "/usr/bin/xmllint") -> str: """Use xmllint to prettify the XML output. Parameters ---------- xml XML to be prettified xmllint Path to the xmllint binary to use for doing the prettifying. Returns ------- str Returns the prettified XML as a string """ with tempfile.NamedTemporaryFile(suffix=".xml", mode="wb") as f: xml.write(f, pretty_print=False) f.flush() output = subprocess.check_output([xmllint, "--pretty", "1", f.name]) return output.decode('utf-8') parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--pb_type', '-p', help="""\ pb_type.xml file """) parser.add_argument( '--xmllint', default='xmllint', help="""\ Location of the xmllint binary to use. Defaults to finding in users path. """ ) parser.add_argument( '--output', '-o', help="""\ Output filename, default '<name>.arch.xml' """ ) def main(args): args = parser.parse_args(args) iname = os.path.basename(args.pb_type) outfile = "{}.arch.xml".format(iname) if args.output is not None: outfile = args.output outfile = os.path.abspath(outfile) pbtype_xml = ET.parse(args.pb_type) pbtype_xml.xinclude() assert os.path.exists(TEMPLATE_PATH), TEMPLATE_PATH arch_tree = ET.parse(TEMPLATE_PATH) arch_root = arch_tree.getroot() pbtype_root = pbtype_xml.getroot() pbtype_leaf = find_leaf(pbtype_xml.getroot()) assert pbtype_leaf is not None, "Unable to find leaf <pb_type> tag in {}".format( args.pb_type ) tile_height = layout_xml(arch_root, pbtype_root) tname = tile_xml(arch_root, pbtype_root, outfile, tile_height) dirpath = os.path.dirname(outfile) models = arch_root.find("models") xmlinc.include_xml( models, os.path.join(dirpath, "{}.model.xml".format(tname.lower())), outfile, xptr="xpointer(models/child::node())", ) with open(outfile, 'w') as f: f.write(pretty_xml(arch_tree, xmllint=args.xmllint)) return 0 if __name__ == "__main__": import doctest failure_count, test_count = doctest.testmod() assert test_count > 0 assert failure_count == 0, "Doctests failed!" sys.exit(main(sys.argv[1:]))
isc
-6,423,540,598,657,562,000
24.280269
96
0.502705
false
obitoo/PebbleTidesUK
www/scrape_easytide.py
1
1977
#!/usr/bin/python # # # Owen Bullock - UK Tides - UKHO Easytide webscrape. # - Parse config.html for list of ports and create a json file for each. # # 25Sep2014 - Created # TODO - timezones - done # TODO - cope with only 3 tides a day - done # remove . from height string, for little pebbles sake # 03Dec14 - multiple ports # 15Mar15 - 3 digit heights # 16Mar15 - tidedata obj # import urllib2 import sys import pprint import json from bs4 import BeautifulSoup import tidedata def scrape_and_create_json_file (port): if port == "0": return tides = tidedata.public(port) tides.scrape() tides.dump_to_file(g_outdir) def run_scrape_for_all_ports(): # parse config file for list of possible ports htmldoc= urllib2.urlopen(g_configfile).read() soup = BeautifulSoup(htmldoc) portlist1 = [str(x['value']) for x in soup.find(id=["port1"]).find_all('option')] portlist2 = [str(x['value']) for x in soup.find(id=["port2"]).find_all('option')] print portlist1 print portlist2 #loop for each primary port for port in portlist1: scrape_and_create_json_file(port) #loop for each secondary port for port in portlist2: scrape_and_create_json_file(port) # # MAIN # # # # # # # # # # # # # # ## # print "----------- starting" g_outdir="/var/www/tides/" g_configfile="file:///var/www/tides/config.html" # # options: -p <port no> : testrun for one port # import optparse parser = optparse.OptionParser("usage: %prog [options] ") parser.add_option("-p", dest="port", type="string", help = "port number, run for single ") (options, args) = parser.parse_args() # # Run for a single port, or look at config file to get list of all ports to scrape. # if options.port: print "Single port TESTMODE:", options.port scrape_and_create_json_file(options.port) else: run_scrape_for_all_ports()
gpl-3.0
-5,955,508,285,917,364,000
21.465909
97
0.632271
false
josuebrunel/myql-cli
myql-cli.py
1
6924
#!/usr/bin/env python import os, sys import cmd import argparse import importlib from utils import pretty_xml, pretty_json from utils import create_init_file, create_tables_file, create_directory, get_module from utils import create_config_file, read_config_file, config_file_exists from myql import MYQL from myql.contrib.table import TableMeta from myql.contrib.auth import YOAuth __author__ = 'josue kouka' __email__ = '[email protected]' __version__ = '0.2.4' ######################################################## # # CONFIG FILE HANDLER # ######################################################## class ConfigAction(argparse.Action): '''Action performed for Init command ''' def __call__(self, parser, namespace, value, option_string=None): if not config_file_exists(): create_config_file() sys.exit(0) print("Config file already exists") sys.exit(1) ######################################################## # # COMMAND LINE QUERIES HANDLER # ######################################################## class ExecuteAction(argparse.Action): '''Action performed for Execute command ''' def __call__(self, parser, namespace, value, option_string=None): config = read_config_file() format = namespace.format if namespace.format else config.get('DEFAULT','format') oauth = config.getboolean('DEFAULT','oauth') # Checking ig OAuth params are defined if oauth : oauth = YOAuth(None, None, from_file=config.get('auth','from_file')) attr = { 'community': True, 'format': format, #'jsonCompact': namespace.jsonCompact if namespace.jsonCompact else config.getboolean(format, 'jsonCompact'), 'debug': namespace.debug if namespace.debug else config.getboolean(format, 'debug'), 'oauth': oauth } yql = MYQL(**attr) yql.diagnostics = namespace.diagnostics if namespace.diagnostics else config.getboolean(format, 'diagnostics') for v in value: response = yql.rawQuery(v) if not response.status_code == 200: print(response.content) sys.exit(1) if format == 'json': print(pretty_json(response.content)) else: print(pretty_xml(response.content)) sys.exit(0) ############################################################ # # SHELL QUERIES HANDLER # ############################################################ class ShellAction(argparse.Action): '''Action performed for shell command ''' def __call__(self, parser, namespace, value, option_string=None): pass class ShellCmd(cmd.Cmd): pass ############################################################ # # YQL TABLE HANDLER # ########################################################### class TableAction(argparse.Action): '''Action performed for Table command ''' def __call__(self, parser, namespace, value, option_string=None): if namespace.init and namespace.create: print("Optional arguments --init and --create can't be used together") sys.exit(1) # Case where non argument is given if not namespace.init and not namespace.create: namespace.create = True if namespace.create : if not os.path.isdir(os.path.realpath(value)): print("{0} table project doesn't exist yet. \n \tpython myql-cli table -i {0} ".format(value)) sys.exit(1) module_path = os.path.realpath(value) module = get_module(module_path) tables = [ v for k,v in module.__dict__.items() if isinstance(v, TableMeta) and k != 'TableModel'] for table in tables : table_name = table.table.name path= os.path.realpath(value) table.table.save(name=table_name, path=path) sys.exit(0) if namespace.init : folder = value if not create_directory(folder): print("This project already exists !!!") sys.exit(0) create_init_file(folder) create_tables_file(folder) sys.exit(0) sys.exit(1) ############################################################ # # MAIN # ############################################################ if __name__ == '__main__': parser = argparse.ArgumentParser("YQL-cli tools", version=__version__) subparsers = parser.add_subparsers(help='commands') # CONFIG config_parser = subparsers.add_parser('init-config', help='Init a config file .myql-cli.ini in your home directory') config_parser.add_argument('init-config', action=ConfigAction, default=True, nargs='*', help='Config File Management') # EXECUTE QUERY execute_parser = subparsers.add_parser('run', help='Run YQL queries') execute_parser.add_argument( 'run', action=ExecuteAction, nargs='*', help="Run YQL Queries" ) execute_parser.add_argument( '--format', action='store', choices=('json','xml'), help="Response returned format" ) execute_parser.add_argument( '--pretty', action='store_true', default=False, help="Response returned format prettyfied" ) execute_parser.add_argument( '--jsonCompact', action='store_true', default=False, help="Json response compacted" ) execute_parser.add_argument( '--diagnostics', action='store_true', default=False, help="Response with diagnostics" ) execute_parser.add_argument( '--debug', action='store_true', default=False, help="Response with diagnostics" ) execute_parser.add_argument( '--oauth', action='store', help="OAuth credentials" ) # LAUNCH SHELL shell_parser = subparsers.add_parser('shell', help='Prompts a YQL shell command') shell_parser.add_argument( 'shell', action=ShellAction, help="SQL like shell" ) # CREATE YQL TABLE table_parser = subparsers.add_parser('table', help='Creates a YQL table') table_parser.add_argument( 'table', action=TableAction, help="Create a YQL Table from python file" ) table_parser.add_argument( '-i', '--init', action='store_true', help="Creates a project with an tables.py file in it" ) table_parser.add_argument( '-c', '--create', action='store_true', help="Creates tables in the tables.py file of your project" ) args = vars(parser.parse_args())
mit
1,900,856,855,279,807,700
28.589744
122
0.536973
false
openstack/os-win
os_win/utils/io/ioutils.py
1
10627
# Copyright 2014 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ctypes import struct from eventlet import patcher from oslo_log import log as logging from oslo_utils import units import six from os_win import _utils from os_win import constants from os_win import exceptions from os_win.utils import win32utils from os_win.utils.winapi import constants as w_const from os_win.utils.winapi import libs as w_lib from os_win.utils.winapi import wintypes kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32) LOG = logging.getLogger(__name__) Queue = patcher.original('queue') WAIT_PIPE_DEFAULT_TIMEOUT = 5 # seconds WAIT_IO_COMPLETION_TIMEOUT = 2 * units.k WAIT_INFINITE_TIMEOUT = 0xFFFFFFFF IO_QUEUE_TIMEOUT = 2 IO_QUEUE_BURST_TIMEOUT = 0.05 class IOUtils(object): """Asyncronous IO helper class.""" def __init__(self): self._win32_utils = win32utils.Win32Utils() def _run_and_check_output(self, *args, **kwargs): eventlet_blocking_mode = kwargs.get('eventlet_nonblocking_mode', False) kwargs.update(kernel32_lib_func=True, failure_exc=exceptions.Win32IOException, eventlet_nonblocking_mode=eventlet_blocking_mode) return self._win32_utils.run_and_check_output(*args, **kwargs) def create_pipe(self, security_attributes=None, size=0, inherit_handle=False): """Create an anonymous pipe. The main advantage of this method over os.pipe is that it allows creating inheritable pipe handles (which is flawed on most Python versions). """ r = wintypes.HANDLE() w = wintypes.HANDLE() if inherit_handle and not security_attributes: security_attributes = wintypes.SECURITY_ATTRIBUTES() security_attributes.bInheritHandle = inherit_handle security_attributes.nLength = ctypes.sizeof(security_attributes) self._run_and_check_output( kernel32.CreatePipe, ctypes.byref(r), ctypes.byref(w), ctypes.byref(security_attributes) if security_attributes else None, size) return r.value, w.value @_utils.retry_decorator(exceptions=exceptions.Win32IOException, max_sleep_time=2) def wait_named_pipe(self, pipe_name, timeout=WAIT_PIPE_DEFAULT_TIMEOUT): """Wait a given amount of time for a pipe to become available.""" self._run_and_check_output(kernel32.WaitNamedPipeW, ctypes.c_wchar_p(pipe_name), timeout * units.k) def open(self, path, desired_access=0, share_mode=0, creation_disposition=0, flags_and_attributes=0): error_ret_vals = [w_const.INVALID_HANDLE_VALUE] handle = self._run_and_check_output(kernel32.CreateFileW, ctypes.c_wchar_p(path), desired_access, share_mode, None, creation_disposition, flags_and_attributes, None, error_ret_vals=error_ret_vals) return handle def close_handle(self, handle): self._run_and_check_output(kernel32.CloseHandle, handle) def cancel_io(self, handle, overlapped_structure=None, ignore_invalid_handle=False): """Cancels pending IO on specified handle. If an overlapped structure is passed, only the IO requests that were issued with the specified overlapped structure are canceled. """ # Ignore errors thrown when there are no requests # to be canceled. ignored_error_codes = [w_const.ERROR_NOT_FOUND] if ignore_invalid_handle: ignored_error_codes.append(w_const.ERROR_INVALID_HANDLE) lp_overlapped = (ctypes.byref(overlapped_structure) if overlapped_structure else None) self._run_and_check_output(kernel32.CancelIoEx, handle, lp_overlapped, ignored_error_codes=ignored_error_codes) def _wait_io_completion(self, event): # In order to cancel this, we simply set the event. self._run_and_check_output(kernel32.WaitForSingleObjectEx, event, WAIT_INFINITE_TIMEOUT, True, error_ret_vals=[w_const.WAIT_FAILED]) def set_event(self, event): self._run_and_check_output(kernel32.SetEvent, event) def _reset_event(self, event): self._run_and_check_output(kernel32.ResetEvent, event) def _create_event(self, event_attributes=None, manual_reset=True, initial_state=False, name=None): return self._run_and_check_output(kernel32.CreateEventW, event_attributes, manual_reset, initial_state, name, error_ret_vals=[None]) def get_completion_routine(self, callback=None): def _completion_routine(error_code, num_bytes, lpOverLapped): """Sets the completion event and executes callback, if passed.""" overlapped = ctypes.cast(lpOverLapped, wintypes.LPOVERLAPPED).contents self.set_event(overlapped.hEvent) if callback: callback(num_bytes) return wintypes.LPOVERLAPPED_COMPLETION_ROUTINE(_completion_routine) def get_new_overlapped_structure(self): """Structure used for asynchronous IO operations.""" # Event used for signaling IO completion hEvent = self._create_event() overlapped_structure = wintypes.OVERLAPPED() overlapped_structure.hEvent = hEvent return overlapped_structure def read(self, handle, buff, num_bytes, overlapped_structure, completion_routine): self._reset_event(overlapped_structure.hEvent) self._run_and_check_output(kernel32.ReadFileEx, handle, buff, num_bytes, ctypes.byref(overlapped_structure), completion_routine) self._wait_io_completion(overlapped_structure.hEvent) def read_file(self, handle, buff, num_bytes, overlapped_structure=None): # Similar to IOUtils.read, but intended for synchronous operations. num_bytes_read = wintypes.DWORD(0) overlapped_structure_ref = ( ctypes.byref(overlapped_structure) if overlapped_structure else None) self._run_and_check_output(kernel32.ReadFile, handle, buff, num_bytes, ctypes.byref(num_bytes_read), overlapped_structure_ref) return num_bytes_read.value def write(self, handle, buff, num_bytes, overlapped_structure, completion_routine): self._reset_event(overlapped_structure.hEvent) self._run_and_check_output(kernel32.WriteFileEx, handle, buff, num_bytes, ctypes.byref(overlapped_structure), completion_routine) self._wait_io_completion(overlapped_structure.hEvent) def write_file(self, handle, buff, num_bytes, overlapped_structure=None): # Similar to IOUtils.write, but intended for synchronous operations. num_bytes_written = wintypes.DWORD(0) overlapped_structure_ref = ( ctypes.byref(overlapped_structure) if overlapped_structure else None) self._run_and_check_output(kernel32.WriteFile, handle, buff, num_bytes, ctypes.byref(num_bytes_written), overlapped_structure_ref) return num_bytes_written.value @classmethod def get_buffer(cls, buff_size, data=None): buff = (ctypes.c_ubyte * buff_size)() if data: cls.write_buffer_data(buff, data) return buff @staticmethod def get_buffer_data(buff, num_bytes): return bytes(bytearray(buff[:num_bytes])) @staticmethod def write_buffer_data(buff, data): for i, c in enumerate(data): buff[i] = struct.unpack('B', six.b(c))[0] class IOQueue(Queue.Queue, object): def __init__(self, client_connected): Queue.Queue.__init__(self) self._client_connected = client_connected def get(self, timeout=IO_QUEUE_TIMEOUT, continue_on_timeout=True): while self._client_connected.isSet(): try: return Queue.Queue.get(self, timeout=timeout) except Queue.Empty: if continue_on_timeout: continue else: break def put(self, item, timeout=IO_QUEUE_TIMEOUT): while self._client_connected.isSet(): try: return Queue.Queue.put(self, item, timeout=timeout) except Queue.Full: continue def get_burst(self, timeout=IO_QUEUE_TIMEOUT, burst_timeout=IO_QUEUE_BURST_TIMEOUT, max_size=constants.SERIAL_CONSOLE_BUFFER_SIZE): # Get as much data as possible from the queue # to avoid sending small chunks. data = self.get(timeout=timeout) while data and len(data) <= max_size: chunk = self.get(timeout=burst_timeout, continue_on_timeout=False) if chunk: data += chunk else: break return data
apache-2.0
3,435,588,681,736,873,500
39.253788
79
0.584455
false