repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
zvolsky/codex2020
controllers/sysadmin.py
1
1299
# -*- coding: utf-8 -*- ''' def __xxx(): # alexandermendes/marc2excel def __xxx2(): # Flask-Z3950 # github.com/alexandermendes/Flask-Z3950 # pythonhosted.org/Flask-Z3950/ # apt install libxml2-dev libxslt-dev python-dev lib32z1-dev # pip install Flask-Z3950 from flask_z3950 import Z3950Manager class pseudoFlask(object): pass app = pseudoFlask() app.config = {} app.extensions = {} db_config = {"db": "Voyager", "host": "z3950.loc.gov", "port": 7090} app.config["Z3950_DATABASES"] = {"loc": db_config} z3950_manager = Z3950Manager(app) z3950_db = z3950_manager.databases['loc'] dataset = z3950_db.search('ti=1066 and all that') print dataset.to_str() print dataset.to_json() ''' from dal_idx import idx_restart, idx_schedule @auth.requires_membership('admin') def restart_idx(): idx_restart() redirect(URL('start_idx')) @auth.requires_membership('admin') def start_idx(): # hint: use restart_idx() if DEBUG_SCHEDULER: idx() return 'Indexing finished.' else: if idx_schedule(): return 'Task idx was added.' else: return 'Task idx already queued. Remove it from scheduler_task table (sysadmin/restart_idx/) if you want re-create it.'
agpl-3.0
-4,096,058,870,002,447,400
27.23913
131
0.631255
false
sserrot/champion_relationships
venv/Lib/site-packages/prompt_toolkit/input/posix_pipe.py
1
1989
import os from typing import ContextManager, TextIO, cast from ..utils import DummyContext from .base import PipeInput from .vt100 import Vt100Input __all__ = [ "PosixPipeInput", ] class PosixPipeInput(Vt100Input, PipeInput): """ Input that is send through a pipe. This is useful if we want to send the input programmatically into the application. Mostly useful for unit testing. Usage:: input = PosixPipeInput() input.send_text('inputdata') """ _id = 0 def __init__(self, text: str = "", responds_to_cpr: bool = True) -> None: self._responds_to_cpr = True self._r, self._w = os.pipe() class Stdin: encoding = "utf-8" def isatty(stdin) -> bool: return True def fileno(stdin) -> int: return self._r super().__init__(cast(TextIO, Stdin())) self.send_text(text) # Identifier for every PipeInput for the hash. self.__class__._id += 1 self._id = self.__class__._id @property def responds_to_cpr(self) -> bool: return self._responds_to_cpr def send_bytes(self, data: bytes) -> None: os.write(self._w, data) def send_text(self, data: str) -> None: " Send text to the input. " os.write(self._w, data.encode("utf-8")) def raw_mode(self) -> ContextManager[None]: return DummyContext() def cooked_mode(self) -> ContextManager[None]: return DummyContext() def close(self) -> None: " Close pipe fds. " os.close(self._r) os.close(self._w) # We should assign `None` to 'self._r` and 'self._w', # The event loop still needs to know the the fileno for this input in order # to properly remove it from the selectors. def typeahead_hash(self) -> str: """ This needs to be unique for every `PipeInput`. """ return "pipe-input-%s" % (self._id,)
mit
1,297,950,809,711,393,500
24.831169
83
0.575163
false
TheAlgorithms/Python
maths/series/p_series.py
1
1255
""" This is a pure Python implementation of the P-Series algorithm https://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#P-series For doctests run following command: python -m doctest -v p_series.py or python3 -m doctest -v p_series.py For manual testing run: python3 p_series.py """ def p_series(nth_term: int, power: int) -> list: """Pure Python implementation of P-Series algorithm :return: The P-Series starting from 1 to last (nth) term Examples: >>> p_series(5, 2) [1, '1/4', '1/9', '1/16', '1/25'] >>> p_series(-5, 2) [] >>> p_series(5, -2) [1, '1/0.25', '1/0.1111111111111111', '1/0.0625', '1/0.04'] >>> p_series("", 1000) '' >>> p_series(0, 0) [] >>> p_series(1, 1) [1] """ if nth_term == "": return nth_term nth_term = int(nth_term) power = int(power) series = [] for temp in range(int(nth_term)): series.append(f"1/{pow(temp + 1, int(power))}" if series else 1) return series if __name__ == "__main__": nth_term = input("Enter the last number (nth term) of the P-Series") power = input("Enter the power for P-Series") print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power))
mit
918,317,342,096,124,200
25.145833
72
0.583267
false
hacktyler/hacktyler_crime
config/settings.py
1
4568
#!/usr/bin/env python import os import django # Base paths DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__)) SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) # Debugging DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'NAME': 'hacktyler_crime', 'USER': 'hacktyler_crime', 'PASSWORD': 'qw8ndyHprt', } } # Localization TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' USE_I18N = True USE_L10N = True # Media STATIC_ROOT = os.path.join(SITE_ROOT, 'media') STATIC_URL = '/site_media/' ADMIN_MEDIA_PREFIX = '/site_media/admin/' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # Uploads MEDIA_ROOT = '/tmp/sirens' # Make this unique, and don't share it with anybody. SECRET_KEY = '+ei7-2)76sh$$dy^5h4zmkglw#ey1d3f0cj^$r+3zo!wq9j+_*' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.media', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', ) ROOT_URLCONF = 'config.urls' TEMPLATE_DIRS = ( os.path.join(SITE_ROOT, 'templates') ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.humanize', 'django.contrib.staticfiles', 'django.contrib.gis', 'compressor', 'activecalls', 'sirens' ) # Email # run "python -m smtpd -n -c DebuggingServer localhost:1025" to see outgoing # messages dumped to the terminal EMAIL_HOST = 'localhost' EMAIL_PORT = 1025 DEFAULT_FROM_EMAIL = '[email protected]' # Django-compressor COMPRESS_ENABLED = False # Caching CACHE_MIDDLEWARE_KEY_PREFIX='hacktyler_crime' CACHE_MIDDLEWARE_SECONDS=90 * 60 # 90 minutes CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } } # Logging LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, }, 'handlers': { 'console': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter': 'standard' }, 'default': { 'level':'INFO', 'class':'logging.handlers.RotatingFileHandler', 'filename': '/var/log/sites/hacktyler_crime/hacktyler_crime.log', 'maxBytes': 1024*1024*5, # 5 MB 'backupCount': 5, 'formatter':'standard', }, 'request_handler': { 'level':'INFO', 'class':'logging.handlers.RotatingFileHandler', 'filename': '/var/log/sites/hacktyler_crime/requests.log', 'maxBytes': 1024*1024*5, # 5 MB 'backupCount': 5, 'formatter':'standard', }, 'backend_handler': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', }, }, 'loggers': { '': { 'handlers': ['default', 'console'], 'level': 'DEBUG', 'propagate': True }, 'django.request': { 'handlers': ['request_handler', 'console'], 'level': 'DEBUG', 'propagate': False }, 'django.db': { 'handlers': ['backend_handler'], 'level': 'DEBUG', 'propagate': False }, 'requests.packages.urllib3.connectionpool': { 'handlers': ['console'], 'level': 'ERROR', 'propogate': False }, 'geopy': { 'handlers': ['console'], 'level': 'INFO', 'propogate': False } } } # Pusher PUSHER_APP_ID = '11732' PUSHER_KEY = 'd20fddb74c58823cd05d' PUSHER_SECRET = None # must be in local_settings.py PUSHER_CHANNEL = 'active-calls-test' # Mapquest MAPQUEST_API_KEY = None # must be in local_settings.py # App DEFAULT_HOURS_DISPLAYED = 4 # Allow for local (per-user) override try: from local_settings import * except ImportError: pass
mit
-6,037,910,759,668,532,000
23.169312
77
0.586471
false
RealTimeWeb/wikisite
MoinMoin/web/utils.py
1
10269
# -*- coding: iso-8859-1 -*- """ MoinMoin - Utility functions for the web-layer @copyright: 2003-2008 MoinMoin:ThomasWaldmann, 2008-2008 MoinMoin:FlorianKrupicka @license: GNU GPL, see COPYING for details. """ import time from werkzeug import abort, redirect, cookie_date, Response from MoinMoin import caching from MoinMoin import log from MoinMoin import wikiutil from MoinMoin.Page import Page from MoinMoin.web.exceptions import Forbidden, SurgeProtection logging = log.getLogger(__name__) def check_forbidden(request): """ Simple action and host access checks Spider agents are checked against the called actions, hosts against the blacklist. Raises Forbidden if triggered. """ args = request.args action = args.get('action') if ((args or request.method != 'GET') and action not in ['rss_rc', 'show', 'sitemap'] and not (action == 'AttachFile' and args.get('do') == 'get')): if request.isSpiderAgent: raise Forbidden() if request.cfg.hosts_deny: remote_addr = request.remote_addr for host in request.cfg.hosts_deny: if host[-1] == '.' and remote_addr.startswith(host): logging.debug("hosts_deny (net): %s" % remote_addr) raise Forbidden() if remote_addr == host: logging.debug("hosts_deny (ip): %s" % remote_addr) raise Forbidden() return False def check_surge_protect(request, kick=False, action=None, username=None): """ Check for excessive requests Raises a SurgeProtection exception on wiki overuse. @param request: a moin request object @param kick: immediately ban this user @param action: specify the action explicitly (default: request.action) @param username: give username (for action == 'auth-name') """ limits = request.cfg.surge_action_limits if not limits: return False remote_addr = request.remote_addr or '' if remote_addr.startswith('127.'): return False validuser = request.user.valid current_action = action or request.action if current_action == 'auth-ip': # for checking if some specific ip tries to authenticate too often, # not considering the username it tries to authenticate as (could # be many different names) if current_action not in limits: # if admin did not add this key to the limits configuration, do nothing return False current_id = remote_addr elif current_action == 'auth-name': # for checking if some username tries to authenticate too often, # not considering the ip the request comes from (could be a distributed # attack on a high-privilege user) if current_action not in limits: # if admin did not add this key to the limits configuration, do nothing return False current_id = username else: # general case current_id = validuser and request.user.name or remote_addr default_limit = limits.get('default', (30, 60)) now = int(time.time()) surgedict = {} surge_detected = False try: # if we have common farm users, we could also use scope='farm': cache = caching.CacheEntry(request, 'surgeprotect', 'surge-log', scope='wiki', use_encode=True) if cache.exists(): data = cache.content() data = data.split("\n") for line in data: try: id, t, action, surge_indicator = line.split("\t") t = int(t) maxnum, dt = limits.get(action, default_limit) if t >= now - dt: events = surgedict.setdefault(id, {}) timestamps = events.setdefault(action, []) timestamps.append((t, surge_indicator)) except StandardError: pass maxnum, dt = limits.get(current_action, default_limit) events = surgedict.setdefault(current_id, {}) timestamps = events.setdefault(current_action, []) surge_detected = len(timestamps) > maxnum surge_indicator = surge_detected and "!" or "" timestamps.append((now, surge_indicator)) if surge_detected: if len(timestamps) < maxnum * 2: timestamps.append((now + request.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out if current_action not in ('cache', 'AttachFile', ): # don't add cache/AttachFile accesses to all or picture galleries will trigger SP action = 'all' # put a total limit on user's requests maxnum, dt = limits.get(action, default_limit) events = surgedict.setdefault(current_id, {}) timestamps = events.setdefault(action, []) if kick: # ban this guy, NOW timestamps.extend([(now + request.cfg.surge_lockout_time, "!")] * (2 * maxnum)) surge_detected = surge_detected or len(timestamps) > maxnum surge_indicator = surge_detected and "!" or "" timestamps.append((now, surge_indicator)) if surge_detected: if len(timestamps) < maxnum * 2: timestamps.append((now + request.cfg.surge_lockout_time, surge_indicator)) # continue like that and get locked out data = [] for id, events in surgedict.items(): for action, timestamps in events.items(): for t, surge_indicator in timestamps: data.append("%s\t%d\t%s\t%s" % (id, t, action, surge_indicator)) data = "\n".join(data) cache.update(data) except StandardError: pass if surge_detected and validuser and request.user.auth_method in request.cfg.auth_methods_trusted: logging.info("Trusted user %s would have triggered surge protection if not trusted.", request.user.name) return False elif surge_detected: logging.warning("Surge Protection: action=%s id=%s (ip: %s)", current_action, current_id, remote_addr) raise SurgeProtection(retry_after=request.cfg.surge_lockout_time) else: return False def redirect_last_visited(request): pagetrail = request.user.getTrail() if pagetrail: # Redirect to last page visited last_visited = pagetrail[-1] wikiname, pagename = wikiutil.split_interwiki(last_visited) if wikiname != request.cfg.interwikiname and wikiname != 'Self': wikitag, wikiurl, wikitail, error = wikiutil.resolve_interwiki(request, wikiname, pagename) url = wikiurl + wikiutil.quoteWikinameURL(wikitail) else: url = Page(request, pagename).url(request) else: # Or to localized FrontPage url = wikiutil.getFrontPage(request).url(request) url = request.getQualifiedURL(url) return abort(redirect(url)) class UniqueIDGenerator(object): def __init__(self, pagename=None): self.unique_stack = [] self.include_stack = [] self.include_id = None self.page_ids = {None: {}} self.pagename = pagename def push(self): """ Used by the TOC macro, this ensures that the ID namespaces are reset to the status when the current include started. This guarantees that doing the ID enumeration twice results in the same results, on any level. """ self.unique_stack.append((self.page_ids, self.include_id)) self.include_id, pids = self.include_stack[-1] self.page_ids = {} for namespace in pids: self.page_ids[namespace] = pids[namespace].copy() def pop(self): """ Used by the TOC macro to reset the ID namespaces after having parsed the page for TOC generation and after printing the TOC. """ self.page_ids, self.include_id = self.unique_stack.pop() return self.page_ids, self.include_id def begin(self, base): """ Called by the formatter when a document begins, which means that include causing nested documents gives us an include stack in self.include_id_stack. """ pids = {} for namespace in self.page_ids: pids[namespace] = self.page_ids[namespace].copy() self.include_stack.append((self.include_id, pids)) self.include_id = self(base) # if it's the page name then set it to None so we don't # prepend anything to IDs, but otherwise keep it. if self.pagename and self.pagename == self.include_id: self.include_id = None def end(self): """ Called by the formatter when a document ends, restores the current include ID to the previous one and discards the page IDs state we kept around for push(). """ self.include_id, pids = self.include_stack.pop() def __call__(self, base, namespace=None): """ Generates a unique ID using a given base name. Appends a running count to the base. Needs to stay deterministic! @param base: the base of the id @type base: unicode @param namespace: the namespace for the ID, used when including pages @returns: a unique (relatively to the namespace) ID @rtype: unicode """ if not isinstance(base, unicode): base = unicode(str(base), 'ascii', 'ignore') if not namespace in self.page_ids: self.page_ids[namespace] = {} count = self.page_ids[namespace].get(base, -1) + 1 self.page_ids[namespace][base] = count if not count: return base return u'%s-%d' % (base, count) FATALTMPL = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <html> <head><title>%(title)s</title></head> <body><h1>%(title)s</h1> <pre> %(body)s </pre></body></html> """ def fatal_response(error): """ Create a response from MoinMoin.error.FatalError instances. """ html = FATALTMPL % dict(title=error.__class__.__name__, body=str(error)) return Response(html, status=500, mimetype='text/html')
apache-2.0
-8,713,514,694,004,186,000
38.045627
141
0.612328
false
daeilkim/refinery
refinery/bnpy/bnpy-dev/bnpy/allocmodel/admix/HDPHardMult.py
1
5496
import numpy as np from .HDPModel import HDPModel from bnpy.suffstats import SuffStatBag from bnpy.util import NumericUtil, NumericHardUtil import scipy.sparse import logging Log = logging.getLogger('bnpy') class HDPHardMult(HDPModel): ######################################################### Local Params ######################################################### def calc_local_params(self, Data, LP, nCoordAscentItersLP=20, convThrLP=0.01, doOnlySomeDocsLP=True, **kwargs): ''' Calculate document-specific quantities (E-step) using hard assignments. Alternate updates to two terms until convergence (1) Approx posterior on topic-token assignment q(word_variational | word_token_variables) (2) Approx posterior on doc-topic probabilities q(doc_variational | document_topic_variables) Returns ------- LP : local params dict ''' # First, run soft assignments for nCoordAscentIters LP = self.calc_local_params_fast(Data, LP, nCoordAscentItersLP, convThrLP, doOnlySomeDocsLP, ) # Next, find hard assignments LP['hard_asgn'] = NumericHardUtil.findMode_Mult( Data.word_count, LP['word_variational'] ) # Update DocTopicCount to use hard assignments for d in xrange(Data.nDoc): start = Data.doc_range[d,0] stop = Data.doc_range[d,1] LP['DocTopicCount'][d,:] = np.sum(LP['hard_asgn'][start:stop], axis=0) # Update alphPi to use hard assignments LP = self.get_doc_variational(Data, LP) LP = self.calc_ElogPi(LP) return LP ######################################################### Suff Stats ######################################################### def get_global_suff_stats(self, Data, LP, doPrecompEntropy=False, doPrecompMergeEntropy=False, mPairIDs=None): ''' Count expected number of times each topic is used across all docs ''' K = LP['DocTopicCount'].shape[1] SS = SuffStatBag(K=K, D=Data.vocab_size) SS.setField('nDoc', Data.nDoc, dims=None) sumLogPi = np.sum(LP['E_logPi'], axis=0) SS.setField('sumLogPiActive', sumLogPi[:K], dims='K') SS.setField('sumLogPiUnused', sumLogPi[-1], dims=None) if doPrecompEntropy: # ---------------- Z terms SS.setELBOTerm('ElogpZ', self.E_logpZ(Data, LP), dims='K') logFactData, logFactZ = self.E_logfactorialZ(Data, LP) SS.setELBOTerm('logFactData', logFactData, dims=None) SS.setELBOTerm('logFactZ', logFactZ, dims='K') # ---------------- Pi terms # Note: no terms needed for ElogpPI # SS already has field sumLogPi, which is sufficient for this term ElogqPiC, ElogqPiA, ElogqPiU = self.E_logqPi_Memoized_from_LP(LP) SS.setELBOTerm('ElogqPiConst', ElogqPiC, dims=None) SS.setELBOTerm('ElogqPiActive', ElogqPiA, dims='K') SS.setELBOTerm('ElogqPiUnused', ElogqPiU, dims=None) if doPrecompMergeEntropy: ElogpZMat, sLgPiMat, ElogqPiMat = self.memo_elbo_terms_for_merge(LP) SS.setMergeTerm('ElogpZ', ElogpZMat, dims=('K','K')) SS.setMergeTerm('ElogqPiActive', ElogqPiMat, dims=('K','K')) SS.setMergeTerm('sumLogPiActive', sLgPiMat, dims=('K','K')) SS.setMergeTerm('logFactZ', self.memo_factorial_term_for_merge(LP, mPairIDs), dims=('K', 'K')) return SS ######################################################### Evidence ######################################################### def calc_evidence( self, Data, SS, LP ): ''' Calculate ELBO terms related to allocation model ''' E_logpV = self.E_logpV() E_logqV = self.E_logqV() E_logpPi = self.E_logpPi(SS) if SS.hasELBOTerms(): E_logqPi = SS.getELBOTerm('ElogqPiConst') \ + SS.getELBOTerm('ElogqPiUnused') \ + np.sum(SS.getELBOTerm('ElogqPiActive')) E_logpZ = np.sum(SS.getELBOTerm('ElogpZ')) E_logfactorialZ = SS.getELBOTerm('logFactData') \ - np.sum(SS.getELBOTerm('logFactZ')) else: E_logqPi = self.E_logqPi(LP) E_logpZ = np.sum(self.E_logpZ(Data, LP)) logFactData, logFactZ = self.E_logfactorialZ(Data, LP) E_logfactorialZ = logFactData - np.sum(logFactZ) if SS.hasAmpFactor(): E_logqPi *= SS.ampF E_logpZ *= SS.ampF elbo = E_logpPi - E_logqPi elbo += E_logpZ + E_logfactorialZ elbo += E_logpV - E_logqV return elbo def E_logfactorialZ(self, Data, LP): logFactData = NumericHardUtil.colwisesumLogFactorial(Data.word_count) logFactHardAsgn = NumericHardUtil.colwisesumLogFactorial(LP['hard_asgn']) return logFactData, logFactHardAsgn def memo_factorial_term_for_merge(self, LP, mPairIDs): if mPairIDs is None: logFactZMerge = NumericHardUtil.colwisesumLogFactorial_allpairs( LP['hard_asgn']) else: logFactZMerge = NumericHardUtil.colwisesumLogFactorial_specificpairs( LP['hard_asgn'], mPairIDs) return logFactZMerge
mit
-3,765,823,403,857,338,000
38.264286
79
0.55313
false
xgt001/uav_msr_ip14
experimental/battousai.py
1
11871
# -*- coding: utf-8 -*- ''' BEGIN OF PROGRAM ---------------------------------------------------------------------------- AUTHOR : Alex Gamas MAIN GOAL : Open an Image file and display this! VERSION : 0.1.2 USAGE TIPS : ---------------------------------------------------------------------------- ''' from PyQt4.QtCore import * from PyQt4.QtGui import * import battousaiUtil as util import sys from datetime import datetime class Point(): color = Qt.black x = 0 y = 0 def color(self): return self.color def x(self): return self.x def y(self): return self.y def __init__(self, x, y, color = Qt.black): self.x = x self.y = y self.color = color class Square(): FIELD = 0 SELECTION = 1 point_1 = Point(0, 0) point_2 = Point(0, 0) x1, x2, y1, y2, h, w = 0, 0, 0, 0, 0, 0 color = Qt.black childs = [] def addChild(self, square): self.childs.append(square) def type(self): if (len(self.childs) > 0): return self.FIELD else: return self.SELECTION def __init__(self, point_1, point_2, color = Qt.black): self.childs = [] self.x1 = min(point_1.x, point_2.x) self.y1 = min(point_1.y, point_2.y) self.x2 = max(point_1.x, point_2.x) self.y2 = max(point_1.y, point_2.y) self.h = self.y2 - self.y1 self.w = self.x2 - self.x1 self.color = color class ImageDrawPanel(QGraphicsPixmapItem): #fieldMarkModeEnabled = False fieldMark = None def __init__(self, pixmap = None, parent = None, scene = None): self.p1 = None self.p2 = None super(ImageDrawPanel, self).__init__() self.x, self.y = -1, -1 self.pen = self.createPen(Qt.blue) def createPen(self, color = Qt.black): pen = QPen(Qt.SolidLine) pen.setColor(color) pen.setWidth(4) return pen ''' def togleFieldMarkMode(self): self.fieldMarkModeEnabled = not self.fieldMarkModeEnabled if self.fieldMarkModeEnabled: self.pen.setColor(Qt.red) print "Field Mark Mode [ ON ]" else: self.pen.setColor(Qt.blue) print "Field Mark Mode [ OFF ]" self.update() ''' def drawCross(self, painter, point): ''' improve the way generates the cross              this when leaving the canvas loses focus! ''' painter.setPen(self.createPen(point.color)) painter.drawLine(point.x, point.y - 100, point.x, point.y + 100) painter.drawLine(point.x - 100, point.y, point.x + 100, point.y) def drawSquare(self, painter, square): painter.setPen(self.createPen(square.color)) painter.drawRect(square.x1, square.y1, square.w, square.h) ''' def markPoint(self, x, y): if self.p1 == None: self.p1 = Point(x, y, self.pen.color()) else: self.p2 = Point(x, y, self.pen.color()) ''' def cleanTempMarks(self): self.x = -1 self.y = -1 self.p1 = None self.p2 = None def cleanAll(self): if self.fieldMark != None: self.fieldMark.childs = None self.fieldMark = None self.cleanTempMarks() def paint(self, painter, option, widget = None): painter.drawPixmap(0, 0, self.pixmap()) #painter.setPen(self.pen) if self.fieldMark != None: self.drawSquare(painter, self.fieldMark) for child in self.fieldMark.childs: self.drawSquare(painter, child) ''' DRAW TIME ''' if self.p1 != None: self.drawCross(painter, self.p1) if self.p2 != None: ''' THIS IS NOT GOOD! ''' point_1 = Point(self.p1.x, self.p1.y, self.pen.color()) point_2 = Point(self.p2.x, self.p2.y, self.pen.color()) square = Square(point_2, point_1, self.pen.color()) if self.fieldMark == None: self.fieldMark = square else: self.fieldMark.addChild(square) ''' LIMPAR ''' self.cleanTempMarks() ''' LIMPAR ''' self.update() #self.drawCross(painter, self.p2) else: if self.x >= 0 and self.y >= 0: ponto_2 = Point(self.x, self.y, self.pen.color()) self.drawSquare(painter, Square(self.p1, ponto_2, self.pen.color())) '''DRAW TIME ''' #if self.fieldMark != None: # self.drawSquare(painter, self.fieldMark) # self.p1 = None # self.p2 = None #def update(self, *args, **kwargs): # if (self.fieldMarkModeEnabled): # self.fieldMark = Square(self.p1, self.p2, self.pen.color()) # return QGraphicsPixmapItem.update(self, *args, **kwargs) def mousePressEvent (self, event): x = event.pos().x() y = event.pos().y() self.p1 = Point(x, y, self.pen.color()) self.update() def mouseReleaseEvent(self, event): x = event.pos().x() y = event.pos().y() self.p2 = Point(x, y, self.pen.color()) self.update() def mouseMoveEvent (self, event): self.x = event.pos().x() self.y = event.pos().y() self.update() class MainWindow(QMainWindow): _FILE_EXTENSIONS = ("bmp", "jpg", "png", "xpm","JPG","PNG") actualImagePos = 0 index = datetime.now() outputFolder = None vecFile = None blobCount = 0 def __init__(self): super(MainWindow, self).__init__() self.scene = QGraphicsScene() self.scene.setSceneRect(0, 0, 1024, 768) self.pixmaps = self.getImages() self.imagePanel = ImageDrawPanel(scene = self.scene) self.scene.addItem(self.imagePanel) self.view = QGraphicsView(self.scene) layout = QHBoxLayout() layout.addWidget(self.view) self.widget = QWidget() self.widget.setLayout(layout) self.setCursor(Qt.CrossCursor) self.setCentralWidget(self.widget) self.setWindowTitle("Edhitha Manual") self.actualImagePos = 0 self.setUpActualImage() # this is image traversal code, not our concern at the moment def setUpActualImage(self): self.imagePanel.cleanAll() if (self.pixmaps != None): print "set-up img ... ", self.actualImagePos, " of ", len(self.pixmaps) # myScaledPixmap = self.pixmaps[self.actualImagePos]. # scaled(self.label.size(), Qt.KeepAspectRatio) # self.imagePanel.panel().setScale(self,) self.imagePanel.setScale(0.23) self.imagePanel.setPixmap(self.pixmaps[self.actualImagePos]) self.imagePanel.update() else: print "Nothing to set-up" def goToPrevImage(self): if (self.pixmaps != None): self.actualImagePos = self.actualImagePos - 1 if self.actualImagePos < 0: self.actualImagePos = len(self.pixmaps) - 1 self.setUpActualImage() def goToNextImage(self): if (self.pixmaps != None): self.actualImagePos = self.actualImagePos + 1 if self.actualImagePos >= len(self.pixmaps): self.actualImagePos = 0 self.setUpActualImage() ''' [filename] [# of objects] [[x y width height] [... 2nd object] ...] ''' def recordVecData(self, pixmap, field): ''' copying the mapped pixmap ''' mainImage = pixmap.copy(field.x1, field.y1, field.w, field.h) if self.outputFolder == None: self.outputFolder = self.chooseDirectory(u"Select the directory?") vecFileName = "{outputFolder}/imagem_vec.vec".format(outputFolder = self.outputFolder) if self.vecFile == None: self.vecFile = file(vecFileName, "w") fieldString = "" detectionQty = 0 imageField = "{:1.0f} {:1.0f} {:1.0f} {:1.0f}" filename = "blobid_{blobCount}_{index}.png".format(index = self.index,blobCount = self.blobCount) imageFullPath = "{outputFolder}/{fname}".format(outputFolder = self.outputFolder, fname = filename) mainImage.save(imageFullPath, format = "PNG", quality = 100); if len(field.childs) == 0: detectionQty = 1 fieldString = imageField.format(0, 0, field.w, field.h) self.vecFile.write("./{} {} {}\n".format(filename, detectionQty, fieldString)) else: detectionQty = len(field.childs) for child in field.childs: fieldString = fieldString + " " + imageField.format(child.x1 - field.x1, child.y1 - field.y1, child.w, child.h) self.vecFile.write("./{} {} {}\n".format(filename, detectionQty, fieldString.strip())) self.index = datetime.now() self.blobCount = self.blobCount +1 def keyPressEvent(self, event): key = event.key() if key == Qt.Key_A: self.goToPrevImage() elif key == Qt.Key_D: self.goToNextImage() elif key == Qt.Key_C: self.imagePanel.cleanAll() self.imagePanel.update() elif key == Qt.Key_R: pixmap = self.pixmaps[self.actualImagePos] self.recordVecData(pixmap, self.imagePanel.fieldMark) def fileExtension(self, filename): return filename[filename.rfind("."):None][1:None] def filterFiles(self, filenames, extensionFilter): filteredFiles = [] for filename in filenames: if (self.fileExtension(filename) in extensionFilter): filteredFiles.append(filename) return filteredFiles def chooseDirectory(self, title): return str(QFileDialog.getExistingDirectory(self, title)) #not of concern at the moment def getImages(self): folder_name = self.chooseDirectory(u"Select Image Folder..") files = self.filterFiles(util.listFiles(folder_name), self._FILE_EXTENSIONS) if len(files) == 0: print u"No Images found ", self._FILE_EXTENSIONS return None else: qtdImages = len(files) ''' QProgressDialog(QWidget parent=None, Qt.WindowFlags flags=0) QProgressDialog(QString, QString, int, int, QWidget parent=None, Qt.WindowFlags flags=0) ''' progressDialog = QProgressDialog("Loading Images..", "Cancel", 0, qtdImages) progressDialog.setWindowModality(Qt.WindowModal) progressDialog.setCancelButton(None) progressDialog.setGeometry(100, 100, 400, 80) progressDialog.show() self.images = [] idx = 0 for fname in files: idx = idx + 1 progressDialog.setValue(idx) self.images.append(QPixmap(fname)) return self.images def closeEvent(self, event): if (self.vecFile != None) and (not self.vecFile.closed): self.vecFile.close() def start(): #if __name__ == "__main__": app = QApplication(sys.argv) mainWindow = MainWindow() mainWindow.showMaximized() #mainWindow.show() sys.exit(app.exec_()) '''END OF PROGRAM ''' def __init__(self): start()
mit
1,616,732,287,557,768,000
30.793566
127
0.53841
false
csuttles/utils
python/google-python-exercises/basic/list2.py
1
2480
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): # +++your code here+++ i = 0 return_nums = [] while i < len(nums): if not return_nums: return_nums.append(nums[i]) if nums[i] != return_nums[-1]: return_nums.append(nums[i]) i += 1 return return_nums # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): # +++your code here+++ ret_list = list1 ret_list.extend(list2) return sorted(ret_list) # note from self: simple is elegant and easy to maintain. # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
apache-2.0
4,741,160,744,597,384,000
31.207792
79
0.65
false
espdev/readthedocs.org
readthedocs/rtd_tests/tests/test_project_views.py
2
12691
from datetime import datetime, timedelta from mock import patch from django.test import TestCase from django.contrib.auth.models import User from django.contrib.messages import constants as message_const from django_dynamic_fixture import get from django_dynamic_fixture import new from readthedocs.core.models import UserProfile from readthedocs.rtd_tests.base import (WizardTestCase, MockBuildTestCase, RequestFactoryTestMixin) from readthedocs.projects.exceptions import ProjectSpamError from readthedocs.projects.models import Project from readthedocs.projects.views.private import ImportWizardView @patch('readthedocs.projects.views.private.trigger_build', lambda x, basic: None) class TestProfileMiddleware(RequestFactoryTestMixin, TestCase): wizard_class_slug = 'import_wizard_view' url = '/dashboard/import/manual/' def setUp(self): super(TestProfileMiddleware, self).setUp() data = { 'basics': { 'name': 'foobar', 'repo': 'http://example.com/foobar', 'repo_type': 'git', }, 'extra': { 'description': 'Describe foobar', 'language': 'en', 'documentation_type': 'sphinx', }, } self.data = {} for key in data: self.data.update({('{0}-{1}'.format(key, k), v) for (k, v) in data[key].items()}) self.data['{0}-current_step'.format(self.wizard_class_slug)] = 'extra' def test_profile_middleware_no_profile(self): """User without profile and isn't banned""" req = self.request('/projects/import', method='post', data=self.data) req.user = get(User, profile=None) resp = ImportWizardView.as_view()(req) self.assertEqual(resp.status_code, 302) self.assertEqual(resp['location'], '/projects/foobar/') @patch.object(ImportWizardView, 'done') def test_profile_middleware_spam(self, view): """User will be banned""" view.side_effect = ProjectSpamError req = self.request('/projects/import', method='post', data=self.data) req.user = get(User) resp = ImportWizardView.as_view()(req) self.assertEqual(resp.status_code, 302) self.assertEqual(resp['location'], '/') self.assertTrue(req.user.profile.banned) def test_profile_middleware_banned(self): """User is banned""" req = self.request('/projects/import', method='post', data=self.data) req.user = get(User) req.user.profile.banned = True req.user.profile.save() self.assertTrue(req.user.profile.banned) resp = ImportWizardView.as_view()(req) self.assertEqual(resp.status_code, 302) self.assertEqual(resp['location'], '/') class TestBasicsForm(WizardTestCase): wizard_class_slug = 'import_wizard_view' url = '/dashboard/import/manual/' def setUp(self): self.eric = User(username='eric') self.eric.set_password('test') self.eric.save() self.client.login(username='eric', password='test') self.step_data['basics'] = { 'name': 'foobar', 'repo': 'http://example.com/foobar', 'repo_type': 'git', } def test_form_pass(self): '''Only submit the basics''' resp = self.post_step('basics') self.assertWizardResponse(resp) proj = Project.objects.get(name='foobar') self.assertIsNotNone(proj) for (key, val) in self.step_data['basics'].items(): self.assertEqual(getattr(proj, key), val) self.assertEqual(proj.documentation_type, 'sphinx') def test_form_missing(self): '''Submit form with missing data, expect to get failures''' self.step_data['basics'] = {'advanced': True} resp = self.post_step('basics') self.assertWizardFailure(resp, 'name') self.assertWizardFailure(resp, 'repo_type') class TestAdvancedForm(TestBasicsForm): def setUp(self): super(TestAdvancedForm, self).setUp() self.step_data['basics']['advanced'] = True self.step_data['extra'] = { 'description': 'Describe foobar', 'language': 'en', 'documentation_type': 'sphinx', } def test_form_pass(self): '''Test all forms pass validation''' resp = self.post_step('basics') self.assertWizardResponse(resp, 'extra') resp = self.post_step('extra') self.assertWizardResponse(resp) proj = Project.objects.get(name='foobar') self.assertIsNotNone(proj) data = self.step_data['basics'] del data['advanced'] data.update(self.step_data['extra']) for (key, val) in data.items(): self.assertEqual(getattr(proj, key), val) def test_form_missing_extra(self): '''Submit extra form with missing data, expect to get failures''' # Remove extra data to trigger validation errors self.step_data['extra'] = {} resp = self.post_step('basics') self.assertWizardResponse(resp, 'extra') resp = self.post_step('extra') self.assertWizardFailure(resp, 'language') self.assertWizardFailure(resp, 'documentation_type') @patch('readthedocs.projects.forms.ProjectExtraForm.clean_description', create=True) def test_form_spam(self, mocked_validator): '''Don't add project on a spammy description''' self.eric.date_joined = datetime.now() - timedelta(days=365) self.eric.save() mocked_validator.side_effect=ProjectSpamError with self.assertRaises(Project.DoesNotExist): proj = Project.objects.get(name='foobar') resp = self.post_step('basics') self.assertWizardResponse(resp, 'extra') resp = self.post_step('extra') self.assertWizardResponse(resp) with self.assertRaises(Project.DoesNotExist): proj = Project.objects.get(name='foobar') self.assertFalse(self.eric.profile.banned) @patch('readthedocs.projects.forms.ProjectExtraForm.clean_description', create=True) def test_form_spam_ban_user(self, mocked_validator): '''Don't add spam and ban new user''' self.eric.date_joined = datetime.now() self.eric.save() mocked_validator.side_effect=ProjectSpamError with self.assertRaises(Project.DoesNotExist): proj = Project.objects.get(name='foobar') resp = self.post_step('basics') self.assertWizardResponse(resp, 'extra') resp = self.post_step('extra') self.assertWizardResponse(resp) with self.assertRaises(Project.DoesNotExist): proj = Project.objects.get(name='foobar') self.assertTrue(self.eric.profile.banned) class TestImportDemoView(MockBuildTestCase): '''Test project import demo view''' fixtures = ['test_data', 'eric'] def setUp(self): self.client.login(username='eric', password='test') def test_import_demo_pass(self): resp = self.client.get('/dashboard/import/manual/demo/') self.assertEqual(resp.status_code, 302) self.assertEqual(resp['Location'], 'http://testserver/projects/eric-demo/') resp_redir = self.client.get(resp['Location']) self.assertEqual(resp_redir.status_code, 200) messages = list(resp_redir.context['messages']) self.assertEqual(messages[0].level, message_const.SUCCESS) def test_import_demo_already_imported(self): '''Import demo project multiple times, expect failure 2nd post''' self.test_import_demo_pass() project = Project.objects.get(slug='eric-demo') resp = self.client.get('/dashboard/import/manual/demo/') self.assertEqual(resp.status_code, 302) self.assertEqual(resp['Location'], 'http://testserver/projects/eric-demo/') resp_redir = self.client.get(resp['Location']) self.assertEqual(resp_redir.status_code, 200) messages = list(resp_redir.context['messages']) self.assertEqual(messages[0].level, message_const.SUCCESS) self.assertEqual(project, Project.objects.get(slug='eric-demo')) def test_import_demo_another_user_imported(self): '''Import demo project after another user, expect success''' self.test_import_demo_pass() project = Project.objects.get(slug='eric-demo') self.client.logout() self.client.login(username='test', password='test') resp = self.client.get('/dashboard/import/manual/demo/') self.assertEqual(resp.status_code, 302) self.assertEqual(resp['Location'], 'http://testserver/projects/test-demo/') resp_redir = self.client.get(resp['Location']) self.assertEqual(resp_redir.status_code, 200) messages = list(resp_redir.context['messages']) self.assertEqual(messages[0].level, message_const.SUCCESS) def test_import_demo_imported_renamed(self): '''If the demo project is renamed, don't import another''' self.test_import_demo_pass() project = Project.objects.get(slug='eric-demo') project.name = 'eric-demo-foobar' project.save() resp = self.client.get('/dashboard/import/manual/demo/') self.assertEqual(resp.status_code, 302) self.assertEqual(resp['Location'], 'http://testserver/projects/eric-demo/') resp_redir = self.client.get(resp['Location']) self.assertEqual(resp_redir.status_code, 200) messages = list(resp_redir.context['messages']) self.assertEqual(messages[0].level, message_const.SUCCESS) self.assertRegexpMatches(messages[0].message, r'already imported') self.assertEqual(project, Project.objects.get(slug='eric-demo')) def test_import_demo_imported_duplicate(self): '''If a project exists with same name, expect a failure importing demo This should be edge case, user would have to import a project (not the demo project), named user-demo, and then manually enter the demo import URL, as the onboarding isn't shown when projects > 0 ''' self.test_import_demo_pass() project = Project.objects.get(slug='eric-demo') project.repo = 'file:///foobar' project.save() resp = self.client.get('/dashboard/import/manual/demo/') self.assertEqual(resp.status_code, 302) self.assertEqual(resp['Location'], 'http://testserver/dashboard/') resp_redir = self.client.get(resp['Location']) self.assertEqual(resp_redir.status_code, 200) messages = list(resp_redir.context['messages']) self.assertEqual(messages[0].level, message_const.ERROR) self.assertRegexpMatches(messages[0].message, r'There was a problem') self.assertEqual(project, Project.objects.get(slug='eric-demo')) class TestPrivateViews(MockBuildTestCase): def setUp(self): self.user = new(User, username='eric') self.user.set_password('test') self.user.save() self.client.login(username='eric', password='test') def test_versions_page(self): pip = get(Project, slug='pip', users=[self.user]) pip.versions.create(verbose_name='1.0') response = self.client.get('/projects/pip/versions/') self.assertEqual(response.status_code, 200) # Test if the versions page works with a version that contains a slash. # That broke in the past, see issue #1176. pip.versions.create(verbose_name='1.0/with-slash') response = self.client.get('/projects/pip/versions/') self.assertEqual(response.status_code, 200) def test_delete_project(self): project = get(Project, slug='pip', users=[self.user]) response = self.client.get('/dashboard/pip/delete/') self.assertEqual(response.status_code, 200) patcher = patch('readthedocs.projects.tasks.remove_dir') with patcher as remove_dir: response = self.client.post('/dashboard/pip/delete/') self.assertEqual(response.status_code, 302) self.assertFalse(Project.objects.filter(slug='pip').exists()) remove_dir.apply_async.assert_called_with( queue='celery', args=[project.doc_path])
mit
-1,323,939,079,990,311,000
37.929448
81
0.622173
false
doraemonext/DEOnlineJudge
app/problem/views.py
1
4398
# -*- coding: utf-8 -*- from django.core.urlresolvers import reverse from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.views.generic import TemplateView from django.http import Http404 from django.db.models import Count from app.problem.models import Problem, Category from app.record.models import Record from lib.tools.mixin import LoginRequiredMixin class ProblemListView(TemplateView): template_name = 'problem/list.html' def get_context_data(self, **kwargs): problem_list = Problem.objects.all() search_problem_id = self.request.GET.get('problem_id', '') search_problem_title = self.request.GET.get('problem_title', '') search_category = self.request.GET.get('category', '') search_source = self.request.GET.get('source', '') if search_problem_id: if search_problem_id.isdigit(): problem_list = problem_list.filter(pk=search_problem_id) else: problem_list = problem_list.filter(pk=0) # 清空记录 if search_problem_title: problem_list = problem_list.filter(title=search_problem_title) if search_category: problem_list = problem_list.filter(category__title=search_category) if search_source: problem_list = problem_list.filter(source=search_source) paginator = Paginator(problem_list, 15) page = self.request.GET.get('page') try: problems = paginator.page(page) except PageNotAnInteger: problems = paginator.page(1) except EmptyPage: problems = paginator.page(paginator.num_pages) for problem in problems: problem.total_count = Record.objects.filter(problem=problem).count() ac_record_queryset = Record.objects.filter(problem=problem, status='AC').values('user').annotate(total=Count('user')) problem.ac_count = len(ac_record_queryset) categories = Category.objects.all() context = super(ProblemListView, self).get_context_data(**kwargs) context['problems'] = problems context['categories'] = categories context['search_problem_id'] = search_problem_id context['search_problem_title'] = search_problem_title context['search_category'] = search_category context['search_source'] = search_source return context class ProblemDetailView(TemplateView): template_name = 'problem/detail.html' def get_context_data(self, **kwargs): problem_id = self.kwargs.get('id') queryset = Problem.objects.filter(pk=problem_id) if not queryset.exists(): raise Http404() problem = queryset[0] context = super(ProblemDetailView, self).get_context_data(**kwargs) context['problem_id'] = problem.pk context['title'] = problem.title context['description'] = problem.description context['input_format'] = problem.input_format context['output_format'] = problem.output_format context['sample'] = problem.problemsample_set.all() context['limit'] = problem.limit context['tips'] = problem.tips context['source'] = problem.source context['judge_type'] = problem.judge_type context['time_limit'] = problem.time_limit context['memory_limit'] = problem.memory_limit return context class ProblemSubmitView(LoginRequiredMixin, TemplateView): template_name = 'problem/submit.html' def get_context_data(self, **kwargs): problem_id = self.kwargs.get('id') queryset = Problem.objects.filter(pk=problem_id) if not queryset.exists(): raise Http404() problem = queryset[0] context = super(ProblemSubmitView, self).get_context_data(**kwargs) context['problem_id'] = problem.pk context['title'] = problem.title context['description'] = problem.description context['input_format'] = problem.input_format context['output_format'] = problem.output_format context['limit'] = problem.limit context['tips'] = problem.tips context['source'] = problem.source context['judge_type'] = problem.judge_type context['time_limit'] = problem.time_limit context['memory_limit'] = problem.memory_limit return context
mit
-8,724,756,005,441,914,000
39.275229
129
0.647153
false
ClearCorp/account-financial-tools
account_general_ledger_fix/wizard/account_report_general_ledger.py
1
1241
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in module root # directory ############################################################################## from openerp import models class account_report_general_ledger(models.TransientModel): _inherit = "account.report.general.ledger" def _print_report(self, cr, uid, ids, data, context=None): if context is None: context = {} data = self.pre_print_report(cr, uid, ids, data, context=context) data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0]) # we comment this as we whant initial balance the same way # if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record # data['form'].update({'initial_balance': False}) if data['form']['landscape'] is False: data['form'].pop('landscape') else: context['landscape'] = data['form']['landscape'] return self.pool['report'].get_action(cr, uid, [], 'account.report_generalledger', data=data, context=context)
agpl-3.0
991,391,364,290,018,600
46.730769
119
0.554392
false
thundernet8/Plog
app/core/api/post.py
1
2783
# coding=utf-8 from flask import request from flask import jsonify from flask import g from flask import url_for from . import api from ..api import auth from .errors import action_failed from app.core.models.posts import Post @api.route('/posts', methods=['POST']) @auth.login_required def add_post(): if not g.current_user.can('add_posts'): return jsonify({'success': 0, 'error': 'permission denied', 'message': u'没有权限添加文章'}) pid_token = request.form.get('pidToken', '') if pid_token: post_id = Post.get_pid_from_token(pid_token) else: post_id = None title = request.form.get('title', 'Untitled') markdown = request.form.get('markdown') slug = request.form.get('slug') image = request.form.get('thumbUrl', '') meta_title = request.form.get('metaTitle', '') meta_description = request.form.get('metaDescription', '') type = 'page' if request.form.get('postType') == 'page' else 'post' tag_ids = request.form.get('tags', '') # 为 tag id 以逗号拼接的字符串 action = 'draft' if request.form.get('action') not in ['publish', 'update', 'draft'] else request.form.get('action') author_id = g.current_user.get_id() update_by = g.current_user.get_id() if post_id and action == 'update': pid = Post.update_post(post_id, title=title, markdown=markdown, slug=slug, image=image, meta_title=meta_title, meta_description=meta_description, type=type, tag_ids=tag_ids, author_id=author_id, update_by=update_by) if not pid: return action_failed(message=u'更新文章失败') post = Post.get_post_json(pid) return jsonify({'success': 1, 'post': post, 'postId': pid}) if action == 'publish': pid = Post.publish_post(post_id, title=title, markdown=markdown, slug=slug, image=image, meta_title=meta_title, meta_description=meta_description, type=type, tag_ids=tag_ids, author_id=author_id, update_by=update_by) message = u'发表文章失败' else: pid = Post.draft_post(post_id, title=title, markdown=markdown, slug=slug, image=image, meta_title=meta_title, meta_description=meta_description, type=type, tag_ids=tag_ids, author_id=author_id, update_by=update_by) message = u'添加文章失败' if not pid: return action_failed(message=message) post = Post.get_post_json(pid) return jsonify({'success': 1, 'post': post, 'postId': pid, 'editUrl': url_for('dashboard.edit_post', post_id=pid, _external=True)})
gpl-3.0
5,375,873,227,232,528,000
44.949153
120
0.600148
false
anisyonk/pilot
FileHandling.py
1
40618
# This module contains functions related to file handling. import os from commands import getoutput from pUtil import tolog, convert, readpar def openFile(filename, mode): """ Open and return a file pointer for the given mode """ # Note: caller needs to close the file f = None if os.path.exists(filename): try: f = open(filename, mode) except IOError, e: tolog("!!WARNING!!2997!! Caught exception: %s" % (e)) else: tolog("!!WARNING!!2998!! File does not exist: %s" % (filename)) return f def getJSONDictionary(filename): """ Read a dictionary with unicode to utf-8 conversion """ dictionary = None from json import load f = openFile(filename, 'r') if f: try: dictionary = load(f) except Exception, e: tolog("!!WARNING!!2222!! Failed to load json dictionary: %s" % (e)) else: f.close() # Try to convert the dictionary from unicode to utf-8 if dictionary != {}: try: dictionary = convert(dictionary) except Exception, e: tolog("!!WARNING!!2996!! Failed to convert dictionary from unicode to utf-8: %s, %s" % (dictionary, e)) else: tolog("!!WARNING!!2995!! Load function returned empty JSON dictionary: %s" % (filename)) return dictionary def writeJSON(file_name, dictionary): """ Write the dictionary to a JSON file """ status = False from json import dump try: fp = open(file_name, "w") except Exception, e: tolog("!!WARNING!!2323!! Failed to open file %s: %s" % (file_name, e)) else: # Write the dictionary try: dump(dictionary, fp, sort_keys=True, indent=4, separators=(',', ': ')) except Exception, e: tolog("!!WARNING!!2324!! Failed to write dictionary to file %s: %s" % (file_name, e)) else: tolog("Wrote dictionary to file %s" % (file_name)) status = True fp.close() return status def readJSON(file_name): """ Read a dictionary from a JSON file """ dictionary = {} from json import load f = openFile(file_name, 'r') if f: # Read the dictionary try: dictionary = load(f) except Exception, e: tolog("!!WARNING!!2332!! Failed to read dictionary from file %s: %s" % (file_name, e)) else: f.close() return dictionary def findLatestTRFLogFile(workdir): """ Find out which is the latest log.* file """ last_log_file = "" # Assume the log files begin with 'log.' pattern = "log." file_list = sortedLs(workdir, pattern) if file_list != []: last_log_file = os.path.join(workdir, file_list[-1]) tolog("Found payload log files: %s" % str(file_list)) tolog("File %s was the last log file that was updated" % (last_log_file)) else: tolog("Did not find any log.* files") return last_log_file def sortedLs(path, pattern): """ Sort the contents of directory 'path' using 'pattern' """ # Note: pattern is only a string, e.g. pattern = 'log.' will return a # list with all files starting with 'log.' in time order mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime file_list = [] try: file_list = list(sorted(os.listdir(path), key=mtime)) except Exception, e: tolog("!!WARNING!!3232!! Failed to obtain sorted file list: %s" % (e)) final_file_list = [] if file_list != []: for f in file_list: if f.startswith(pattern): final_file_list.append(f) return final_file_list def readFile(filename): """ Read the contents of a file """ contents = "" if os.path.exists(filename): try: f = open(filename, 'r') except IOError, e: tolog("!!WARNING!!2121!! Failed to open file %s: %s" % (filename, e)) else: try: contents = f.read() except Exception, e: tolog("!!WARNING!!2122!! Failed to read file %s: %s" % (filename, e)) f.close() else: tolog("!!WARNING!!2121!! File does not exist: %s" % (filename)) return contents def writeFile(filename, contents, mode='w'): """ Write the contents to filename """ status = False try: f = open(filename, mode) except IOError, e: tolog("!!WARNING!!2123!! Failed to open file %s: %s" % (filename, e)) else: try: f.write(contents) except IOError, e: tolog("!!WARNING!!2123!! Failed to write to file %s: %s" % (filename, e)) else: status = True f.close() return status def getTracingReportFilename(): """ Return the name of the tracing report JSON file """ return "tracing_report.json" def getOSTransferDictionaryFilename(): """ Return the name of the objectstore transfer dictionary file """ return "os_transfer_dictionary.json" def getExtension(alternative='pickle'): """ get the file extension (json or whatever 'alternative' is set to, pickle by default) """ try: from json import load except: extension = alternative else: extension = "json" return extension def getHash(s, length): """ Return a hash from string s """ import hashlib _hash = hashlib.md5() _hash.update(s) return _hash.hexdigest()[:length] def getHashedBucketEndpoint(endpoint, file_name): """ Return a hashed bucket endpoint """ # Example: # endpoint = "atlas_logs", file_name = "log.tgz" # -> hash = "07" and hashed_endpoint = "atlas_logs_07" # return endpoint + "_" + getHash(file_name, 2) return endpoint def addToOSTransferDictionary(file_name, workdir, os_bucket_id, os_ddmendpoint): """ Add the transferred file to the OS transfer file """ # Note: we don't want to store the file name since potentially there can be a large number of files # We only store a file number count # We still need to know the file name in order to figure out which bucket it belongs to (using a hash of the file name) # The hash will be added to the os_ddmendpoint (e.g. 'atlas_logs' -> 'atlas_logs_E2') # Only proceed if os_bucket_id and os_ddmendpoint have values if os_bucket_id and os_bucket_id != "" and os_ddmendpoint and os_ddmendpoint != "": # Get the name and path of the objectstore transfer dictionary file os_tr_path = os.path.join(workdir, getOSTransferDictionaryFilename()) # Create a hash of the file name, two char long, and then the final bucket endpoint _endpoint = getHashedBucketEndpoint(os_ddmendpoint, file_name) # Does the transfer file exist already? If not, create it if os.path.exists(os_tr_path): # Read back the existing dictionary dictionary = readJSON(os_tr_path) if not dictionary: tolog("Failed to open OS transfer dictionary - will recreate it") dictionary = {} else: # Create a new dictionary dictionary = {} tolog("New OS transfer dictionary created: %s" % (os_tr_path)) # Populate the dictionary if dictionary.has_key(os_bucket_id): if dictionary[os_bucket_id].has_key(_endpoint): # Increase the file count dictionary[os_bucket_id][_endpoint] += 1 else: # One file has been stored in this endpoint dictionary[os_bucket_id][_endpoint] = 1 else: # One file has been stored in this endpoint dictionary[os_bucket_id] = {_endpoint: 1} # Store the dictionary if writeJSON(os_tr_path, dictionary): tolog("Stored updated OS transfer dictionary: %s" % (os_tr_path)) else: tolog("!!WARNING!!2211!! Failed to store OS transfer dictionary") else: tolog("Cannot add to OS transfer dictionary due to unset values (os_name/os_ddmendpoint)") def getOSTransferDictionary(filename): """ Get the dictionary of objectstore os_bucket_ids with populated buckets from the OS transfer dictionary """ # Note: will return a dictionary of os_bucket_ids identifiers to which files were actually transferred, along # with the names of the buckets where the files were transferred to # This function is used to generate the jobMetrics OS message (only the OS and bucket endpoints are of interest) # os_bucket_ids_dictionary # FORMAT: { 'os_bucket_id': ['os_bucket_endpoint', ''], .. } # OS transfer dictionary # FORMAT: { 'os_bucket_id': { 'os_bucket_endpoint': <number of transferred files>, .. }, .. } os_bucket_ids_dictionary = {} if os.path.exists(filename): # Get the OS transfer dictionary dictionary = readJSON(filename) if dictionary != {}: tmp_os_bucket_ids_list = dictionary.keys() # Only report the os_bucket_id if there were files transferred to it for os_bucket_id in tmp_os_bucket_ids_list: # Get the os_bucket_endpoint list and populate the final dictionary os_bucket_endpoint_list = dictionary[os_bucket_id].keys() for os_bucket_endpoint in os_bucket_endpoint_list: n = dictionary[os_bucket_id][os_bucket_endpoint] tolog("OS bucket id %s: %d file(s) transferred to bucket %s" % (os_bucket_id, n, os_bucket_endpoint)) if n > 0: if os_bucket_ids_dictionary.has_key(os_bucket_id): os_bucket_ids_dictionary[os_bucket_id].append(os_bucket_endpoint) else: os_bucket_ids_dictionary[os_bucket_id] = [os_bucket_endpoint] else: tolog("!!WARNING!!3334!! OS transfer dictionary is empty") else: tolog("!!WARNING!!3333!! OS transfer dictionary does not exist at: %s" % (filename)) return os_bucket_ids_dictionary def getPilotErrorReportFilename(workdir): """ Return the filename for the pilot error report """ # This file should be placed in the pilot init dir return os.path.join(workdir, "pilot_error_report.json") def updatePilotErrorReport(pilotErrorCode, pilotErrorDiag, priority, jobID, workdir): """ Write pilot error info to file """ # Report format: # { jobID1: { priority1: [{ pilotErrorCode1:<nr>, pilotErrorDiag1:<str> }, .. ], .. }, .. } # The pilot will report only the first of the highest priority error when it reports the error at the end of the job # Use the following priority convention: # "0": highest priority [e.g. errors that originate from the main pilot module (unless otherwise necessary)] # "1": high priority [e.g. errors that originate from the Monitor module (-||-)] # "2": normal priority [errors that originate from other modules (-||-)] # etc # Convert to string if integer is sent for priority if type(priority) != str: priority = str(priority) filename = getPilotErrorReportFilename(workdir) if os.path.exists(filename): # The file already exists, read it back (with unicode to utf-8 conversion) dictionary = getJSONDictionary(filename) else: dictionary = {} # Sort the new error if dictionary.has_key(jobID): jobID_dictionary = dictionary[jobID] # Update with the latest error info if not jobID_dictionary.has_key(priority): dictionary[jobID][priority] = [] new_dictionary = { 'pilotErrorCode':pilotErrorCode, 'pilotErrorDiag':pilotErrorDiag } # Update the dictionary with the new info dictionary[jobID][priority].append(new_dictionary) print dictionary else: # Create a first entry into the error report dictionary[jobID] = {} dictionary[jobID][priority] = [] dictionary[jobID][priority].append({}) dictionary[jobID][priority][0] = { 'pilotErrorCode':pilotErrorCode, 'pilotErrorDiag':pilotErrorDiag } # Finally update the file status = writeJSON(filename, dictionary) def getHighestPriorityError(jobId, workdir): """ Return the highest priority error for jobId from the pilot error report file """ # Return: {'pilotErrorCode': <nr>, 'pilotErrorDiag': '..'} # Note: only the first of the highest priority errors will be returned errorInfo = {} filename = getPilotErrorReportFilename(workdir) if os.path.exists(filename): # The file already exists, read it back (with unicode to utf-8 conversion) dictionary = getJSONDictionary(filename) if dictionary.has_key(jobId): # Find the highest priority error for this job highestPriority = 999 for priority in dictionary[jobId].keys(): try: p = int(priority) except Exception, e: tolog("!!WARNING!!2321!! Unexpected key in pilot error report: %s" % (e)) else: if p < highestPriority: highestPriority = p if highestPriority < 999: # Get the first reported error errorInfo = dictionary[jobId][str(highestPriority)][0] else: tolog("!!WARNING!!2322!! Could not locate the highest priority error") else: tolog("Pilot error report does not contain any error info for job %s" % (jobId)) else: tolog("Pilot error report does not exist: %s (should only exist if there actually was an error)" % (filename)) return errorInfo def discoverAdditionalOutputFiles(output_file_list, workdir, datasets_list, scope_list): """ Have any additional output files been produced by the trf? If so, add them to the output file list """ # In case an output file has reached the max output size, the payload can spill over the remaining events to # a new file following the naming scheme: original_output_filename.extension_N, where N >= 1 # Any additional output file will have the same dataset as the original file from glob import glob from re import compile, findall new_output_file_list = [] new_datasets_list = [] new_scope_list = [] found_new_files = False # Create a lookup dictionaries dataset_dict = dict(zip(output_file_list, datasets_list)) scope_dict = dict(zip(output_file_list, scope_list)) # Loop over all output files for output_file in output_file_list: # Add the original file and dataset new_output_file_list.append(output_file) new_datasets_list.append(dataset_dict[output_file]) new_scope_list.append(scope_dict[output_file]) # Get a list of all files whose names begin with <output_file> files = glob(os.path.join(workdir, "%s*" % (output_file))) for _file in files: # Exclude the original file output_file_full_path = os.path.join(workdir, output_file) if _file != output_file_full_path: # Create the search pattern pattern = compile(r'(%s\.?\_\d+)' % (output_file_full_path)) found = findall(pattern, _file) # Add the file name (not full path) of the found file, if found if found: found_new_files = True new_file = os.path.basename(found[0]) new_output_file_list.append(new_file) dataset = dataset_dict[output_file] new_datasets_list.append(dataset) scope = scope_dict[output_file] new_scope_list.append(scope) tolog("Discovered additional output file: %s (dataset = %s, scope = %s)" % (new_file, dataset, scope)) return new_output_file_list, new_datasets_list, new_scope_list def getJobReportFileName(workDir): """ Return the name of the jobReport, full path """ return os.path.join(workDir, "jobReport.json") # WARNING: EXPERIMENT SPECIFIC AND ALSO DEFINED IN ERRORDIAGNOSIS def getJobReport(workDir): """ Get the jobReport.json dictionary """ # Note: always return at least an empty dictionary dictionary = {} filename = getJobReportFileName(workDir) if os.path.exists(filename): # the jobReport file exists, read it back (with unicode to utf-8 conversion) dictionary = getJSONDictionary(filename) if not dictionary: # getJSONDictionary() can return None dictionary = {} else: tolog("!!WARNING!!1111!! File %s does not exist" % (filename)) return dictionary def removeNoOutputFiles(workdir, outFiles, allowNoOutput, outFilesGuids): """ Remove files from output file list if they are listed in allowNoOutput and do not exist """ _outFiles = [] _guids = [] i = 0 for filename in outFiles: path = os.path.join(workdir, filename) if filename in allowNoOutput: if os.path.exists(path): tolog("File %s is listed in allowNoOutput but exists (will not be removed from list of files to be staged-out)" % (filename)) _outFiles.append(filename) _guids.append(outFilesGuids[i]) else: tolog("File %s is listed in allowNoOutput and does not exist (will be removed from list of files to be staged-out)" % (filename)) else: if os.path.exists(path): tolog("File %s is not listed in allowNoOutput (will be staged-out)" % (filename)) else: tolog("!!WARNING!!4343!! File %s is not listed in allowNoOutput and does not exist (job will fail)" % (filename)) _outFiles.append(filename) # Append here, fail later _guids.append(outFilesGuids[i]) i += 1 return _outFiles def extractOutputFiles(analysisJob, workdir, allowNoOutput, outFiles, outFilesGuids): """ Extract the output files from the JSON if possible """ try: if not analysisJob: extracted_output_files, extracted_guids = extractOutputFilesFromJSON(workdir, allowNoOutput) else: if allowNoOutput == []: tolog("Will not extract output files from jobReport for user job (and allowNoOut list is empty)") extracted_output_files = [] extracted_guids = [] else: # Remove the files listed in allowNoOutput if they don't exist extracted_output_files, extracted_guids = removeNoOutputFiles(workdir, outFiles, allowNoOutput, outFilesGuids) except Exception, e: tolog("!!WARNING!!2327!! Exception caught: %s" % (e)) extracted_output_files = [] extracted_guids = [] return extracted_output_files, extracted_guids def addToJobReport(workDir, key, value, section=None, subsection=None): """ Add the key with value to the jobReport """ # Add the key and value to the corresponding section in set # Note: the function reads the jobReport, adds the new key (or overwrites it) then saves the updated jobReport again (overwrite) try: jobReport_dictionary = getJobReport(workDir) if jobReport_dictionary != {}: # Add the new key and value if section: if jobReport_dictionary.has_key(section): if subsection: if jobReport_dictionary[section].has_key(subsection): jobReport_dictionary[section][subsection][key] = value else: tolog("!!WARNING!!2325!! jobReport does not have subsection=%s in the expected location; will not add key=%s" % (subsection, key)) else: jobReport_dictionary[section][key] = value else: tolog("!!WARNING!!2324!! jobReport does not have section=%s in the expected location; will not add key=%s" % (section, key)) else: jobReport_dictionary[key] = value # Overwrite the jobReport with the updated dictionary filename = getJobReportFileName(workDir) if not writeJSON(filename, jobReport_dictionary): tolog("!!WARNING!!2323!! Failed to write updated jobReport") else: tolog("jobReport not available, will not add new key: %s" % (key)) except Exception, e: tolog("!!WARNING!!2321!! Exception caught: %s" % (e)) def extractOutputFilesFromJSON(workDir, allowNoOutput): """ In case the trf has produced additional output files (spill-over), extract all output files from the jobReport """ # Note: ignore files with nentries = 0 output_files = [] guids = [] tolog("Extracting output files from jobReport") jobReport_dictionary = getJobReport(workDir) if jobReport_dictionary != {}: if jobReport_dictionary.has_key('files'): file_dictionary = jobReport_dictionary['files'] if file_dictionary.has_key('output'): output_file_list = file_dictionary['output'] for f_dictionary in output_file_list: if f_dictionary.has_key('subFiles'): subFiles_list = f_dictionary['subFiles'] for f_names_dictionary in subFiles_list: if f_names_dictionary.has_key('name'):# and f_names_dictionary.has_key('nentries'): # Only add the file is nentries > 0 nentries = f_names_dictionary.get("nentries", "UNDEFINED") if type(nentries) == int and nentries > 0: output_files.append(f_names_dictionary['name']) # Also get the file guid if f_names_dictionary.has_key('file_guid'): guids.append(f_names_dictionary['file_guid']) else: tolog("!!WARNING!!1212!! Did not find any guid for this file: %s (will be generated)" % (f_names_dictionary['name'])) guids.append(None) else: # Only ignore the file if it is allowed to be ignored if not type(nentries) == int: tolog("!!WARNING!!4542!! nentries is not a number: %s" % str(nentries)) # Special handling for origName._NNN # origName._NNN are unmerged files dynamically produced by AthenaMP. Job definition doesn't # explicitly specify those names but only the base names, thus allowNoOutput contains only base names # in this case. We want to ignore origName._NNN when allowNoOutput=['origName'] from re import compile allowNoOutputEx = [compile(s+'\.?_\d+$') for s in allowNoOutput] if f_names_dictionary['name'] in allowNoOutput or any(patt.match(f_names_dictionary['name']) for patt in allowNoOutputEx): tolog("Ignoring file %s since nentries=%s" % (f_names_dictionary['name'], str(nentries))) else: tolog("Will not ignore empty file %s since file is not in allowNoOutput list" % (f_names_dictionary['name'])) output_files.append(f_names_dictionary['name']) # Also get the file guid if f_names_dictionary.has_key('file_guid'): guids.append(f_names_dictionary['file_guid']) else: tolog("!!WARNING!!1212!! Did not find any guid for this file: %s (will be generated)" % (f_names_dictionary['name'])) guids.append(None) else: tolog("No such key: name/nentries") else: tolog("No such key: subFiles") else: tolog("No such key: output") else: tolog("No such key: files") if len(output_files) == 0: tolog("No output files found in jobReport") else: tolog("Output files found in jobReport: %s" % (output_files)) return output_files, guids def getDestinationDBlockItems(filename, original_output_files, destinationDBlockToken, destinationDblock, scopeOut): """ Return destinationDBlock items (destinationDBlockToken, destinationDblock, scope) for given file """ # Note: in case of spill-over file, the file name will end with _NNN or ._NNN. This will be removed from the file name # so that the destinationDBlockToken of the original output file will be used filename = filterSpilloverFilename(filename) # Which is the corresponding destinationDBlockToken for this file? _destinationDBlockToken = getOutputFileItem(filename, destinationDBlockToken, original_output_files) # Which is the corresponding destinationDblock for this file? _destinationDblock = getOutputFileItem(filename, destinationDblock, original_output_files) # Which is the corresponding scopeOut for this file? _scopeOut = getOutputFileItem(filename, scopeOut, original_output_files) return _destinationDBlockToken, _destinationDblock, _scopeOut def getOutputFileItem(filename, outputFileItem, original_output_files): """ Which is the corresponding destinationDBlock item for this file? """ # Find the file number (all lists are ordered) i = 0 if filename in original_output_files: for f in original_output_files: if f == filename: break i += 1 _outputFileItem = outputFileItem[i] else: tolog("!!WARNING!!4545!! File %s not found in original output file list (will use outputFileItem[0])" % (filename)) _outputFileItem = outputFileItem[0] return _outputFileItem def filterSpilloverFilename(filename): """ Remove any unwanted spill-over filename endings (i.e. _NNN or ._NNN) """ # Create the search pattern from re import compile, findall pattern = compile(r'(\.?\_\d+)') found = findall(pattern, filename) if found: # Make sure that the _NNN substring is at the end of the string for f in found: if filename.endswith(f): # Do not use replace here since it might cut away something from inside the filename and not only at the end filename = filename[:-len(f)] return filename def getDirSize(d): """ Return the size of directory d using du -sk """ tolog("Checking size of work dir: %s" % (d)) from commands import getoutput size_str = getoutput("du -sk %s" % (d)) size = 0 # E.g., size_str = "900\t/scratch-local/nilsson/pilot3z" try: # Remove tab and path, and convert to int (and B) size = int(size_str.split("\t")[0])*1024 except Exception, e: tolog("!!WARNING!!4343!! Failed to convert to int: %s" % (e)) else: tolog("Size of directory %s: %d B" % (d, size)) return size def addToTotalSize(path, total_size): """ Add the size of file with 'path' to the total size of all in/output files """ if os.path.exists(path): from SiteMover import SiteMover sitemover = SiteMover() # Get the file size fsize = sitemover.getLocalFileSize(path) tolog("Size of file %s: %s B" % (path, fsize)) if fsize != "": total_size += long(fsize) else: tolog("Skipping file %s in work dir size check since it is not present" % (path)) return total_size def storeWorkDirSize(workdir_size, pilot_initdir, job, correction=True): """ Store the measured remaining disk space """ # If correction=True, then input and output file sizes will be deducated filename = os.path.join(pilot_initdir, getWorkDirSizeFilename(job.jobId)) dictionary = {} # FORMAT: { 'workdir_size': [value1, value2, ..] } workdir_size_list = [] if os.path.exists(filename): # Read back the dictionary dictionary = readJSON(filename) if dictionary != {}: workdir_size_list = dictionary['workdir_size'] else: tolog("!!WARNING!!4555!! Failed to read back remaining disk space from file: %s" % (filename)) # Correct for any input and output files if correction: total_size = 0L # B if os.path.exists(job.workdir): # Find out which input and output files have been transferred and add their sizes to the total size # (Note: output files should also be removed from the total size since outputfilesize is added in the task def) # First remove the log file from the output file list outFiles = [] for f in job.outFiles: if not job.logFile in f: outFiles.append(f) # Then update the file list in case additional output files have been produced # Note: don't do this deduction since it is not known by the task definition #outFiles, dummy, dummy = discoverAdditionalOutputFiles(outFiles, job.workdir, job.destinationDblock, job.scopeOut) file_list = job.inFiles + outFiles for f in file_list: if f != "": total_size = addToTotalSize(os.path.join(job.workdir, f), total_size) tolog("Total size of present input+output files: %d B (work dir size: %d B)" % (total_size, workdir_size)) workdir_size -= total_size else: tolog("WARNING: Can not correct for input/output files since workdir does not exist: %s" % (job.workdir)) # Append the new value to the list and store it workdir_size_list.append(workdir_size) dictionary = {'workdir_size': workdir_size_list} status = writeJSON(filename, dictionary) if status: tolog("Stored %d B in file %s" % (workdir_size, filename)) return status def getWorkDirSizeFilename(jobId): """ Return the name of the workdir_size.json file """ return "workdir_size-%s.json" % (jobId) def getMaxWorkDirSize(path, jobId): """ Return the maximum disk space used by a payload """ filename = os.path.join(path, getWorkDirSizeFilename(jobId)) maxdirsize = 0 if os.path.exists(filename): # Read back the workdir space dictionary dictionary = readJSON(filename) if dictionary != {}: # Get the workdir space list try: workdir_size_list = dictionary['workdir_size'] except Exception, e: tolog("!!WARNING!!4557!! Could not read back work dir space list: %s" % (e)) else: # Get the maximum value from the list maxdirsize = max(workdir_size_list) else: tolog("!!WARNING!!4555!! Failed to read back work dir space from file: %s" % (filename)) else: tolog("!!WARNING!!4556!! No such file: %s" % (filename)) return maxdirsize # ATLAS specific def getNumberOfEvents(workDir): """ Extract the number of events from the job report """ Nevents = {} # FORMAT: { format : total_events, .. } jobReport_dictionary = getJobReport(workDir) if jobReport_dictionary != {}: if jobReport_dictionary.has_key('resource'): resource_dictionary = jobReport_dictionary['resource'] if resource_dictionary.has_key('executor'): executor_dictionary = resource_dictionary['executor'] for format in executor_dictionary.keys(): # "RAWtoESD", .. if executor_dictionary[format].has_key('nevents'): if Nevents.has_key(format): print executor_dictionary[format]['nevents'] Nevents[format] += executor_dictionary[format]['nevents'] else: Nevents[format] = executor_dictionary[format]['nevents'] else: tolog("Format %s has no such key: nevents" % (format)) else: tolog("No such key: executor") else: tolog("No such key: resource") # Now find the largest number of events among the different formats if Nevents != {}: try: Nmax = max(Nevents.values()) except Exception, e: tolog("!!WARNING!!2323!! Exception caught: %s" % (e)) Nmax = 0 else: tolog("Did not find the number of events in the job report") Nmax = 0 return Nmax # ATLAS specific def getDBInfo(workDir): """ Extract and add up the DB info from the job report """ # Input: workDir (location of jobReport.json # Output: dbTime, dbData [converted strings, e.g. "dbData=105077960 dbTime=251.42"] dbTime = 0 dbData = 0L jobReport_dictionary = getJobReport(workDir) if jobReport_dictionary != {}: if jobReport_dictionary.has_key('resource'): resource_dictionary = jobReport_dictionary['resource'] if resource_dictionary.has_key('executor'): executor_dictionary = resource_dictionary['executor'] for format in executor_dictionary.keys(): # "RAWtoESD", .. if executor_dictionary[format].has_key('dbData'): try: dbData += executor_dictionary[format]['dbData'] except: pass else: tolog("Format %s has no such key: dbData" % (format)) if executor_dictionary[format].has_key('dbTime'): try: dbTime += executor_dictionary[format]['dbTime'] except: pass else: tolog("Format %s has no such key: dbTime" % (format)) else: tolog("No such key: executor") else: tolog("No such key: resource") if dbData != 0L: dbDataS = "%s" % (dbData) else: dbDataS = "" if dbTime != 0: dbTimeS = "%.2f" % (dbTime) else: dbTimeS = "" return dbTimeS, dbDataS # ATLAS specific def getCPUTimes(workDir): """ Extract and add up the total CPU times from the job report """ # Note: this is used with Event Service jobs # Input: workDir (location of jobReport.json) # Output: cpuCU (unit), totalCPUTime, conversionFactor totalCPUTime = 0L jobReport_dictionary = getJobReport(workDir) if jobReport_dictionary != {}: if jobReport_dictionary.has_key('resource'): resource_dictionary = jobReport_dictionary['resource'] if resource_dictionary.has_key('executor'): executor_dictionary = resource_dictionary['executor'] for format in executor_dictionary.keys(): # "RAWtoESD", .. if executor_dictionary[format].has_key('cpuTime'): try: totalCPUTime += executor_dictionary[format]['cpuTime'] except: pass else: tolog("Format %s has no such key: cpuTime" % (format)) else: tolog("No such key: executor") else: tolog("No such key: resource") conversionFactor = 1.0 cpuCU = "s" return cpuCU, totalCPUTime, conversionFactor def getDirectAccess(analyjob=False): """ Should direct i/o be used, and which type of direct i/o """ directInLAN = useDirectAccessLAN() directInWAN = useDirectAccessWAN() directInType = 'None' directIn = False if directInLAN: directInType = 'LAN' directIn = True if directInWAN: # if (directInWAN and not analyjob) or (directInWAN and directInLAN and analyjob): directInType = 'WAN' # Overrides LAN if both booleans are set to True directIn = True return directIn, directInType def _useDirectAccess(LAN=True, WAN=False): """ Should direct i/o be used over LAN or WAN? """ useDA = False if LAN: par = 'direct_access_lan' elif WAN: par = 'direct_access_wan' else: tolog("!!WARNING!!3443!! Bad LAN/WAN combination: LAN=%s, WAN=%s" % (str(LAN), str(WAN))) par = '' if par != '': da = readpar(par) if da: da = da.lower() if da == "true": useDA = True return useDA def useDirectAccessLAN(): """ Should direct i/o be used over LAN? """ return _useDirectAccess(LAN=True, WAN=False) def useDirectAccessWAN(): """ Should direct i/o be used over WAN? """ return _useDirectAccess(LAN=False, WAN=True) def getReplicaDictionaryFromXML(workdir, pfc_name="PoolFileCatalog.xml"): """ Return the replica information from a PFC """ # NOTE: Currently this function only returns LFNs and PFNs ec = 0 pilotErrorDiag = "" replica_dictionary = {} # FORMAT: { <lfn1>:{'pfn':<pfn1>, ..}, .. } pfcFile = os.path.join(workdir, pfc_name) if not os.path.exists(pfcFile): tolog("!!WARNING!!3332!! No such file: %s" % pfcFile) return None # make sure the PFC exists from xml.dom import minidom xmldoc = minidom.parse(pfcFile) fileList = xmldoc.getElementsByTagName("File") for thisfile in fileList: #lfn = str(thisfile.getElementsByTagName("lfn")[0].getAttribute("name")) pfn = str(thisfile.getElementsByTagName("pfn")[0].getAttribute("name")) lfn = os.path.basename(pfn) replica_dictionary[lfn] = {} replica_dictionary[lfn]['pfn'] = pfn return replica_dictionary def touch(path): """ Touch a file and update mtime in case the file exists. :param path: :return: """ with open(path, 'a'): os.utime(path, None) def getOsTimesTuple(workdir): """ Read os.times() from a txt file and convert it back to a proper os.times() tuple again """ # This function is used to calculate the cpu consumption time. The t0_times.txt file is created just before the # payload is executed in times = [] failed = False path = os.path.join(workdir, 't0_times.txt') if os.path.exists(path): with open(path, 'r') as f: for t in f.read().split(): # remove any initial (, trailing ) or , a = t.strip('(').strip(')').strip(',') try: times.append(float(a)) except ValueError as e: tolog("!!WARNING!!1212!! Exception caught: offending value=%s (cannot convert to float)" % (e)) failed = True break if not failed: # consistency check if len(times) == 5: return tuple(times) else: tolog("!!WARNING!!1222!! os.times() tuple has wrong length (not 5): %s" % str(times)) else: tolog("!!WARNING!!1222!! Failed to convert os.times() txt file to tuple - CPU consumption meausurement cannot be done") return None else: tolog("t0 file does not exist - probably the payload was not executed") return None def get_files(pattern="*.log"): files = [] stdout = getoutput("find . -name %s" % pattern) if stdout: # remove last \n if present if stdout.endswith('\n'): stdout = stdout[:-1] files = stdout.split('\n') return files def tail(filename, lines=10): return getoutput('tail -%d %s' % (lines, filename))
apache-2.0
-7,112,027,889,522,221,000
38.434951
161
0.587498
false
anish/buildbot
master/setup.py
1
21649
#!/usr/bin/env python # # This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members """ Standard setup script. """ import glob import inspect import os import pkg_resources import sys from distutils.command.install_data import install_data from distutils.command.sdist import sdist from distutils.version import LooseVersion from setuptools import setup from buildbot import version if "bdist_wheel" in sys.argv: BUILDING_WHEEL = True else: BUILDING_WHEEL = False def include(d, e): """Generate a pair of (directory, file-list) for installation. 'd' -- A directory 'e' -- A glob pattern""" return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)]) def include_statics(d): r = [] for root, ds, fs in os.walk(d): r.append((root, [os.path.join(root, f) for f in fs])) return r class install_data_twisted(install_data): """make sure data files are installed in package. this is evil. copied from Twisted/setup.py. """ def finalize_options(self): self.set_undefined_options('install', ('install_lib', 'install_dir'), ) super().finalize_options() def run(self): super().run() # ensure there's a buildbot/VERSION file fn = os.path.join(self.install_dir, 'buildbot', 'VERSION') open(fn, 'w').write(version) self.outfiles.append(fn) class our_sdist(sdist): def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) # ensure there's a buildbot/VERSION file fn = os.path.join(base_dir, 'buildbot', 'VERSION') open(fn, 'w').write(version) # ensure that NEWS has a copy of the latest release notes, with the # proper version substituted src_fn = os.path.join('docs', 'relnotes/index.rst') with open(src_fn) as f: src = f.read() src = src.replace('|version|', version) dst_fn = os.path.join(base_dir, 'NEWS') with open(dst_fn, 'w') as f: f.write(src) def define_plugin_entry(name, module_name): """ helper to produce lines suitable for setup.py's entry_points """ if isinstance(name, tuple): entry, name = name else: entry = name return '%s = %s:%s' % (entry, module_name, name) def concat_dicts(*dicts): result = dict() for d in dicts: result.update(d) return result def define_plugin_entries(groups): """ helper to all groups for plugins """ result = dict() for group, modules in groups: tempo = [] for module_name, names in modules: tempo.extend([define_plugin_entry(name, module_name) for name in names]) result[group] = tempo return result __file__ = inspect.getframeinfo(inspect.currentframe()).filename with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as long_d_f: long_description = long_d_f.read() setup_args = { 'name': "buildbot", 'version': version, 'description': "The Continuous Integration Framework", 'long_description': long_description, 'author': "Brian Warner", 'author_email': "[email protected]", 'maintainer': "Dustin J. Mitchell", 'maintainer_email': "[email protected]", 'url': "http://buildbot.net/", 'classifiers': [ 'Development Status :: 5 - Production/Stable', 'Environment :: No Input/Output (Daemon)', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Testing', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' ], 'packages': [ "buildbot", "buildbot.configurators", "buildbot.worker", "buildbot.worker.protocols", "buildbot.changes", "buildbot.clients", "buildbot.data", "buildbot.db", "buildbot.db.migrate.versions", "buildbot.db.types", "buildbot.machine", "buildbot.monkeypatches", "buildbot.mq", "buildbot.plugins", "buildbot.process", "buildbot.process.users", "buildbot.reporters", "buildbot.schedulers", "buildbot.scripts", "buildbot.secrets", "buildbot.secrets.providers", "buildbot.statistics", "buildbot.statistics.storage_backends", "buildbot.status", "buildbot.steps", "buildbot.steps.package", "buildbot.steps.package.deb", "buildbot.steps.package.rpm", "buildbot.steps.source", "buildbot.util", "buildbot.wamp", "buildbot.www", "buildbot.www.hooks", "buildbot.www.authz", ] + ([] if BUILDING_WHEEL else [ # skip tests for wheels (save 50% of the archive) "buildbot.test", "buildbot.test.util", "buildbot.test.fake", "buildbot.test.fuzz", "buildbot.test.integration", "buildbot.test.integration.interop", "buildbot.test.regressions", "buildbot.test.unit", ]), 'data_files': [ include("buildbot/reporters/templates", "*.txt"), ("buildbot/db/migrate", [ "buildbot/db/migrate/migrate.cfg", ]), include("buildbot/db/migrate/versions", "*.py"), ("buildbot/scripts", [ "buildbot/scripts/sample.cfg", "buildbot/scripts/buildbot_tac.tmpl", ]), include("buildbot/spec", "*.raml"), include("buildbot/spec/types", "*.raml"), include("buildbot/test/unit/test_templates_dir", "*.html"), include("buildbot/test/unit/test_templates_dir/plugin", "*.*"), include("buildbot/test/integration/pki", "*.*"), include("buildbot/test/integration/pki/ca", "*.*"), ] + include_statics("buildbot/www/static"), 'cmdclass': {'install_data': install_data_twisted, 'sdist': our_sdist}, 'entry_points': concat_dicts(define_plugin_entries([ ('buildbot.changes', [ ('buildbot.changes.mail', [ 'MaildirSource', 'CVSMaildirSource', 'SVNCommitEmailMaildirSource', 'BzrLaunchpadEmailMaildirSource']), ('buildbot.changes.bitbucket', ['BitbucketPullrequestPoller']), ('buildbot.changes.github', ['GitHubPullrequestPoller']), ('buildbot.changes.bonsaipoller', ['BonsaiPoller']), ('buildbot.changes.gerritchangesource', ['GerritChangeSource']), ('buildbot.changes.gitpoller', ['GitPoller']), ('buildbot.changes.hgpoller', ['HgPoller']), ('buildbot.changes.p4poller', ['P4Source']), ('buildbot.changes.pb', ['PBChangeSource']), ('buildbot.changes.svnpoller', ['SVNPoller']) ]), ('buildbot.schedulers', [ ('buildbot.schedulers.basic', [ 'SingleBranchScheduler', 'AnyBranchScheduler']), ('buildbot.schedulers.dependent', ['Dependent']), ('buildbot.schedulers.triggerable', ['Triggerable']), ('buildbot.schedulers.forcesched', ['ForceScheduler']), ('buildbot.schedulers.timed', [ 'Periodic', 'Nightly', 'NightlyTriggerable']), ('buildbot.schedulers.trysched', [ 'Try_Jobdir', 'Try_Userpass']) ]), ('buildbot.secrets', [ ('buildbot.secrets.providers.file', ['SecretInAFile']), ('buildbot.secrets.providers.passwordstore', ['SecretInPass']), ('buildbot.secrets.providers.vault', ['HashiCorpVaultSecretProvider']) ]), ('buildbot.worker', [ ('buildbot.worker.base', ['Worker']), ('buildbot.worker.ec2', ['EC2LatentWorker']), ('buildbot.worker.libvirt', ['LibVirtWorker']), ('buildbot.worker.openstack', ['OpenStackLatentWorker']), ('buildbot.worker.docker', ['DockerLatentWorker']), ('buildbot.worker.kubernetes', ['KubeLatentWorker']), ('buildbot.worker.local', ['LocalWorker']), ]), ('buildbot.machine', [ ('buildbot.machine.base', ['Machine']), ]), ('buildbot.steps', [ ('buildbot.process.buildstep', ['BuildStep']), ('buildbot.steps.cmake', ['CMake']), ('buildbot.steps.cppcheck', ['Cppcheck']), ('buildbot.steps.http', [ 'HTTPStep', 'POST', 'GET', 'PUT', 'DELETE', 'HEAD', 'OPTIONS']), ('buildbot.steps.master', [ 'MasterShellCommand', 'SetProperty', 'SetProperties', 'LogRenderable', "Assert"]), ('buildbot.steps.maxq', ['MaxQ']), ('buildbot.steps.mswin', ['Robocopy']), ('buildbot.steps.mtrlogobserver', ['MTR']), ('buildbot.steps.package.deb.lintian', ['DebLintian']), ('buildbot.steps.package.deb.pbuilder', [ 'DebPbuilder', 'DebCowbuilder', 'UbuPbuilder', 'UbuCowbuilder']), ('buildbot.steps.package.rpm.mock', [ 'Mock', 'MockBuildSRPM', 'MockRebuild']), ('buildbot.steps.package.rpm.rpmbuild', ['RpmBuild']), ('buildbot.steps.package.rpm.rpmlint', ['RpmLint']), ('buildbot.steps.package.rpm.rpmspec', ['RpmSpec']), ('buildbot.steps.python', [ 'BuildEPYDoc', 'PyFlakes', 'PyLint', 'Sphinx']), ('buildbot.steps.python_twisted', [ 'HLint', 'Trial', 'RemovePYCs']), ('buildbot.steps.shell', [ 'ShellCommand', 'TreeSize', 'SetPropertyFromCommand', 'Configure', 'WarningCountingShellCommand', 'Compile', 'Test', 'PerlModuleTest']), ('buildbot.steps.shellsequence', ['ShellSequence']), ('buildbot.steps.source.bzr', ['Bzr']), ('buildbot.steps.source.cvs', ['CVS']), ('buildbot.steps.source.darcs', ['Darcs']), ('buildbot.steps.source.gerrit', ['Gerrit']), ('buildbot.steps.source.git', ['Git', 'GitCommit', 'GitPush', 'GitTag']), ('buildbot.steps.source.github', ['GitHub']), ('buildbot.steps.source.gitlab', ['GitLab']), ('buildbot.steps.source.mercurial', ['Mercurial']), ('buildbot.steps.source.mtn', ['Monotone']), ('buildbot.steps.source.p4', ['P4']), ('buildbot.steps.source.repo', ['Repo']), ('buildbot.steps.source.svn', ['SVN']), ('buildbot.steps.subunit', ['SubunitShellCommand']), ('buildbot.steps.transfer', [ 'FileUpload', 'DirectoryUpload', 'MultipleFileUpload', 'FileDownload', 'StringDownload', 'JSONStringDownload', 'JSONPropertiesDownload']), ('buildbot.steps.trigger', ['Trigger']), ('buildbot.steps.vstudio', [ 'VC6', 'VC7', 'VS2003', 'VC8', 'VS2005', 'VCExpress9', 'VC9', 'VS2008', 'VC10', 'VS2010', 'VC11', 'VS2012', 'VC12', 'VS2013', 'VC14', 'VS2015', 'MsBuild4', 'MsBuild', 'MsBuild12', 'MsBuild14']), ('buildbot.steps.worker', [ 'SetPropertiesFromEnv', 'FileExists', 'CopyDirectory', 'RemoveDirectory', 'MakeDirectory']), ]), ('buildbot.reporters', [ ('buildbot.reporters.mail', ['MailNotifier']), ('buildbot.reporters.pushjet', ['PushjetNotifier']), ('buildbot.reporters.pushover', ['PushoverNotifier']), ('buildbot.reporters.message', ['MessageFormatter']), ('buildbot.reporters.gerrit', ['GerritStatusPush']), ('buildbot.reporters.gerrit_verify_status', ['GerritVerifyStatusPush']), ('buildbot.reporters.hipchat', ['HipChatStatusPush']), ('buildbot.reporters.http', ['HttpStatusPush']), ('buildbot.reporters.github', ['GitHubStatusPush', 'GitHubCommentPush']), ('buildbot.reporters.gitlab', ['GitLabStatusPush']), ('buildbot.reporters.stash', ['StashStatusPush']), ('buildbot.reporters.bitbucketserver', ['BitbucketServerStatusPush', 'BitbucketServerPRCommentPush']), ('buildbot.reporters.bitbucket', ['BitbucketStatusPush']), ('buildbot.reporters.irc', ['IRC']), ('buildbot.reporters.telegram', ['TelegramBot']), ('buildbot.reporters.zulip', ['ZulipStatusPush']), ]), ('buildbot.util', [ # Connection seems to be a way too generic name, though ('buildbot.worker.libvirt', ['Connection']), ('buildbot.changes.filter', ['ChangeFilter']), ('buildbot.changes.gerritchangesource', ['GerritChangeFilter']), ('buildbot.changes.svnpoller', [ ('svn.split_file_projects_branches', 'split_file_projects_branches'), ('svn.split_file_branches', 'split_file_branches'), ('svn.split_file_alwaystrunk', 'split_file_alwaystrunk')]), ('buildbot.configurators.janitor', ['JanitorConfigurator']), ('buildbot.config', ['BuilderConfig']), ('buildbot.locks', [ 'MasterLock', 'WorkerLock', ]), ('buildbot.manhole', [ 'AuthorizedKeysManhole', 'PasswordManhole', 'TelnetManhole']), ('buildbot.process.builder', [ 'enforceChosenWorker', ]), ('buildbot.process.factory', [ 'BuildFactory', 'GNUAutoconf', 'CPAN', 'Distutils', 'Trial', 'BasicBuildFactory', 'QuickBuildFactory', 'BasicSVN']), ('buildbot.process.logobserver', ['LogLineObserver']), ('buildbot.process.properties', [ 'FlattenList', 'Interpolate', 'Property', 'Transform', 'WithProperties', 'renderer', 'Secret']), ('buildbot.process.properties', [ 'CommandlineUserManager']), ('buildbot.revlinks', ['RevlinkMatch']), ('buildbot.reporters.utils', ['URLForBuild']), ('buildbot.schedulers.forcesched', [ 'AnyPropertyParameter', 'BooleanParameter', 'ChoiceStringParameter', 'CodebaseParameter', 'FileParameter', 'FixedParameter', 'InheritBuildParameter', 'IntParameter', 'NestedParameter', 'ParameterGroup', 'PatchParameter', 'StringParameter', 'TextParameter', 'UserNameParameter', 'WorkerChoiceParameter', ]), ('buildbot.process.results', [ 'Results', 'SUCCESS', 'WARNINGS', 'FAILURE', 'SKIPPED', 'EXCEPTION', 'RETRY', 'CANCELLED']), ('buildbot.steps.mtrlogobserver', ['EqConnectionPool']), ('buildbot.steps.source.repo', [ ('repo.DownloadsFromChangeSource', 'RepoDownloadsFromChangeSource'), ('repo.DownloadsFromProperties', 'RepoDownloadsFromProperties')]), ('buildbot.steps.shellsequence', ['ShellArg']), ('buildbot.util.kubeclientservice', ['KubeHardcodedConfig', 'KubeCtlProxyConfigLoader', 'KubeInClusterConfigLoader']), ('buildbot.www.avatar', ['AvatarGravatar']), ('buildbot.www.auth', [ 'UserPasswordAuth', 'HTPasswdAuth', 'RemoteUserAuth', 'CustomAuth']), ('buildbot.www.ldapuserinfo', ['LdapUserInfo']), ('buildbot.www.oauth2', [ 'GoogleAuth', 'GitHubAuth', 'GitLabAuth', 'BitbucketAuth']), ('buildbot.db.dbconfig', [ 'DbConfig']), ('buildbot.www.authz', [ 'Authz', 'fnmatchStrMatcher', 'reStrMatcher']), ('buildbot.www.authz.roles', [ 'RolesFromEmails', 'RolesFromGroups', 'RolesFromOwner', 'RolesFromUsername', 'RolesFromDomain']), ('buildbot.www.authz.endpointmatchers', [ 'AnyEndpointMatcher', 'StopBuildEndpointMatcher', 'ForceBuildEndpointMatcher', 'RebuildBuildEndpointMatcher', 'AnyControlEndpointMatcher', 'EnableSchedulerEndpointMatcher']), ]), ('buildbot.webhooks', [ ('buildbot.www.hooks.base', ['base']), ('buildbot.www.hooks.bitbucket', ['bitbucket']), ('buildbot.www.hooks.github', ['github']), ('buildbot.www.hooks.gitlab', ['gitlab']), ('buildbot.www.hooks.gitorious', ['gitorious']), ('buildbot.www.hooks.poller', ['poller']), ('buildbot.www.hooks.bitbucketcloud', ['bitbucketcloud']), ('buildbot.www.hooks.bitbucketserver', ['bitbucketserver']) ]) ]), { 'console_scripts': [ 'buildbot=buildbot.scripts.runner:run', # this will also be shipped on non windows :-( 'buildbot_windows_service=buildbot.scripts.windows_service:HandleCommandLine', ]} ) } # set zip_safe to false to force Windows installs to always unpack eggs # into directories, which seems to work better -- # see http://buildbot.net/trac/ticket/907 if sys.platform == "win32": setup_args['zip_safe'] = False py_35 = sys.version_info[0] > 3 or ( sys.version_info[0] == 3 and sys.version_info[1] >= 5) if not py_35: raise RuntimeError("Buildbot master requires at least Python-3.5") # pip<1.4 doesn't have the --pre flag, and will thus attempt to install alpha # and beta versions of Buildbot. Prevent that from happening. VERSION_MSG = """ This is a pre-release version of Buildbot, which can only be installed with pip-1.4 or later Try installing the latest stable version of Buildbot instead: pip install buildbot==0.8.12 See https://pypi.python.org/pypi/buildbot to verify the current stable version. """ if 'a' in version or 'b' in version: try: pip_dist = pkg_resources.get_distribution('pip') except pkg_resources.DistributionNotFound: pip_dist = None if pip_dist: if LooseVersion(pip_dist.version) < LooseVersion('1.4'): raise RuntimeError(VERSION_MSG) twisted_ver = ">= 17.9.0" autobahn_ver = ">= 0.16.0" txaio_ver = ">= 2.2.2" bundle_version = version.split("-")[0] # dependencies setup_args['install_requires'] = [ 'setuptools >= 8.0', 'Twisted ' + twisted_ver, 'Jinja2 >= 2.1', # required for tests, but Twisted requires this anyway 'zope.interface >= 4.1.1', 'sqlalchemy>=1.1.0', 'sqlalchemy-migrate>=0.9', 'python-dateutil>=1.5', 'txaio ' + txaio_ver, 'autobahn ' + autobahn_ver, 'PyJWT', 'pyyaml' ] # Unit test dependencies. test_deps = [ # http client libraries 'treq', 'txrequests', # pyjade required for custom templates tests 'pyjade', # boto3 and moto required for running EC2 tests 'boto3', 'moto', 'mock>=2.0.0', 'parameterized', ] if sys.platform != 'win32': test_deps += [ # LZ4 fails to build on Windows: # https://github.com/steeve/python-lz4/issues/27 # lz4 required for log compression tests. 'lz4', ] setup_args['tests_require'] = test_deps setup_args['extras_require'] = { 'test': [ 'setuptools_trial', 'isort', # spellcheck introduced in version 1.4.0 'pylint<1.7.0', 'pyenchant', 'flake8~=2.6.0', ] + test_deps, 'bundle': [ "buildbot-www=={0}".format(bundle_version), "buildbot-worker=={0}".format(bundle_version), "buildbot-waterfall-view=={0}".format(bundle_version), "buildbot-console-view=={0}".format(bundle_version), "buildbot-grid-view=={0}".format(bundle_version), ], 'tls': [ 'Twisted[tls] ' + twisted_ver, # There are bugs with extras inside extras: # <https://github.com/pypa/pip/issues/3516> # so we explicitly include Twisted[tls] dependencies. 'pyopenssl >= 16.0.0', 'service_identity', 'idna >= 0.6', ], 'docs': [ 'docutils<0.13.0', 'sphinx>1.4.0,<2.1.0', 'sphinxcontrib-blockdiag', 'sphinxcontrib-spelling', 'pyenchant', 'docutils>=0.8', 'sphinx-jinja', 'towncrier', ], } if '--help-commands' in sys.argv or 'trial' in sys.argv or 'test' in sys.argv: setup_args['setup_requires'] = [ 'setuptools_trial', ] if os.getenv('NO_INSTALL_REQS'): setup_args['install_requires'] = None setup_args['extras_require'] = None if __name__ == '__main__': setup(**setup_args) # Local Variables: # fill-column: 71 # End:
gpl-2.0
154,408,006,337,735,580
37.867145
130
0.57684
false
jeicher/NMRPy
setup.py
1
2092
import os import glob import sysconfig try: from setuptools import setup except ImportError: from distutils.core import setup with open('requirements.txt') as f: requirements = f.read().splitlines() with open('nmrpy/version.py') as f: exec(f.read()) # data files for tests data_path = 'test_data' data_dirs = [data_path] os.chdir('nmrpy/tests') files_and_dirs = glob.glob('test_data/**/*', recursive=True) for i in files_and_dirs: if os.path.isdir(i): data_dirs.append(i) os.chdir('../..') mydata_nmrpy_test = [i+'/*' for i in data_dirs] # data files for base package (documentation PDF) mydata_nmrpy = ['docs/*'] config = { 'description': 'A suite of tools for processing and analysing NMR spectra in Python.', 'long_description': """ NMRPy is a Python 3 module for the processing and analysis of NMR spectra. The functionality of NMRPy is structured to make the analysis of arrayed NMR spectra more intuitive and is specifically targeted to the quantification of reaction time-courses collected with NMR. NMRPy features a set of easy-to-use tools for: - easy loading of spectra from a variety of vendors, - bulk Fourier transform and phase correction of arrayed spectra, - peak selection (programmatically or using graphical widgets), - integration of peaks by deconvolution. NMRPy is developed by Johann Eicher and Johann Rohwer from the Laboratory for Molecular Systems Biology, Dept. of Biochemistry, Stellenbosch University, South Africa. """, 'author': 'Johann Eicher <[email protected]>, Johann Rohwer <[email protected]>', 'author_email': '[email protected], [email protected]', 'maintainer': 'Johann Rohwer', 'maintainer_email': '[email protected]', 'url': 'https://github.com/NMRPy/nmrpy', 'version': __version__, 'install_requires': requirements, 'platforms': ['Windows', 'Linux', 'macOS'], 'packages': ['nmrpy', 'nmrpy.tests'], 'package_data': {'nmrpy.tests': mydata_nmrpy_test, 'nmrpy': mydata_nmrpy}, 'license': 'New BSD', 'name': 'nmrpy' } setup(**config)
bsd-3-clause
-1,635,607,369,083,636,200
32.741935
93
0.704589
false
bigmlcom/bigmler
bigmler/options/test.py
1
4107
# -*- coding: utf-8 -*- # # Copyright 2014-2021 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Options for BigMLer test files processing """ def get_test_options(defaults=None): """Test files-related options """ if defaults is None: defaults = {} options = { # Path to the test set. "--test": { 'action': 'store', 'dest': 'test_set', 'nargs': '?', 'default': defaults.get('test', None), 'help': "Test set path."}, # Name of the file to output predictions. "--output": { 'action': 'store', 'dest': 'output', 'default': defaults.get('output', None), 'help': "Path to the file to output predictions."}, # Set when the test set file doesn't include a header on the first # line. '--no-test-header': { 'action': 'store_false', 'dest': 'test_header', 'default': defaults.get('test_header', True), 'help': "The test set file hasn't a header."}, # Test set field separator. Defaults to the locale csv # separator. '--test-separator': { 'action': 'store', 'dest': 'test_separator', 'default': defaults.get('test_separator', None), 'help': "Test set field separator."}, # The path to a file containing attributes if you want to alter BigML's # default field attributes or the ones provided by the test file # header. '--test-field-attributes': { 'action': 'store', 'dest': 'test_field_attributes', 'default': defaults.get('test_field_attributes', None), 'help': ("Path to a csv file describing field attributes." " One definition per line" " (e.g., 0,'Last Name').")}, # The path to a file containing types if you want to alter BigML's # type auto-detect. '--test-types': { 'action': 'store', 'dest': 'test_types', 'default': defaults.get('test_types', None), 'help': ("Path to a file describing field types. One" " definition per line (e.g., 0, 'numeric').")}, # If a BigML test source is provided, the script won't create a new one '--test-source': { 'action': 'store', 'dest': 'test_source', 'default': defaults.get('test_source', None), 'help': "BigML test source Id."}, # If a BigML test dataset is provided, the script won't create a new # one '--test-dataset': { 'action': 'store', 'dest': 'test_dataset', 'default': defaults.get('test_dataset', None), 'help': "BigML test dataset Id."}, # The path to a file containing dataset ids. '--test-datasets': { 'action': 'store', 'dest': 'test_datasets', 'default': defaults.get('test_datasets', None), 'help': ("Path to a file containing dataset/ids. Just" " one dataset per line" " (e.g., dataset/50a20697035d0706da0004a4).")}, # Set when the test set file does include a header on the first # line. (opposed to --no-test-header) '--test-header': { 'action': 'store_true', 'dest': 'test_header', 'default': defaults.get('test_header', True), 'help': "The test set file has a header."}} return options
apache-2.0
-8,259,076,186,247,161,000
35.669643
79
0.54468
false
AllYarnsAreBeautiful/knittingpattern
knittingpattern/test/test_load_instructions.py
1
1104
from pytest import fixture import os from knittingpattern.InstructionLibrary import InstructionLibrary @fixture def lib(): return InstructionLibrary() def test_load_from_relative_file(lib): relative_path = "test_instructions/test_instruction_1.json" lib.load.relative_file(__file__, relative_path) assert lib.as_instruction({"type": "test1"})["value"] == 1 assert "value" not in lib.as_instruction({"type": "test2"}) def test_load_from_relative_folder(lib): lib.load.relative_folder(__file__, "test_instructions") assert lib.as_instruction({"type": "test1"})["value"] == 1 assert lib.as_instruction({"type": "test2"})["value"] == 2 def test_load_from_folder(lib): folder = os.path.join(os.path.dirname(__file__), "test_instructions") lib.load.folder(folder) assert lib.as_instruction({"type": "test2"})["value"] == 2 assert lib.as_instruction({"type": "test1"})["value"] == 1 def test_loading_from_folder_recursively(lib): lib.load.relative_folder(__file__, "test_instructions") assert lib.as_instruction({"type": "test3"})["value"] == 3
lgpl-3.0
7,731,722,249,725,216,000
32.454545
73
0.678442
false
memsharded/conan
conans/paths/__init__.py
1
1885
# coding=utf-8 import os import platform if platform.system() == "Windows": from conans.util.windows import conan_expand_user, rm_conandir else: from conans.util.files import rmdir conan_expand_user = os.path.expanduser rm_conandir = rmdir def get_conan_user_home(): user_home = os.getenv("CONAN_USER_HOME", "~") tmp = conan_expand_user(user_home) if not os.path.isabs(tmp): raise Exception("Invalid CONAN_USER_HOME value '%s', " "please specify an absolute or path starting with ~/ " "(relative to user home)" % tmp) return os.path.abspath(tmp) # Files CONANFILE = 'conanfile.py' CONANFILE_TXT = "conanfile.txt" CONAN_MANIFEST = "conanmanifest.txt" BUILD_INFO = 'conanbuildinfo.txt' BUILD_INFO_GCC = 'conanbuildinfo.gcc' BUILD_INFO_COMPILER_ARGS = 'conanbuildinfo.args' BUILD_INFO_CMAKE = 'conanbuildinfo.cmake' BUILD_INFO_QMAKE = 'conanbuildinfo.pri' BUILD_INFO_QBS = 'conanbuildinfo.qbs' BUILD_INFO_VISUAL_STUDIO = 'conanbuildinfo.props' BUILD_INFO_XCODE = 'conanbuildinfo.xcconfig' BUILD_INFO_PREMAKE = 'conanbuildinfo.premake.lua' BUILD_INFO_MAKE = 'conanbuildinfo.mak' BUILD_INFO_DEPLOY = 'deploy_manifest.txt' CONANINFO = "conaninfo.txt" CONANENV = "conanenv.txt" SYSTEM_REQS = "system_reqs.txt" PUT_HEADERS = "artifacts.properties" PACKAGE_TGZ_NAME = "conan_package.tgz" EXPORT_TGZ_NAME = "conan_export.tgz" EXPORT_SOURCES_TGZ_NAME = "conan_sources.tgz" EXPORT_SOURCES_DIR_OLD = ".c_src" RUN_LOG_NAME = "conan_run.log" DEFAULT_PROFILE_NAME = "default" SCM_FOLDER = "scm_folder.txt" PACKAGE_METADATA = "metadata.json" CACERT_FILE = "cacert.pem" # Server authorities file DATA_YML = "conandata.yml" # Directories EXPORT_FOLDER = "export" EXPORT_SRC_FOLDER = "export_source" SRC_FOLDER = "source" BUILD_FOLDER = "build" PACKAGES_FOLDER = "package" SYSTEM_REQS_FOLDER = "system_reqs"
mit
-6,278,964,964,823,793,000
29.901639
78
0.710875
false
JoshLabs/django-allauth
allauth/socialaccount/south_migrations/0001_initial.py
1
4750
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.conf import settings try: from django.contrib.auth import get_user_model except ImportError: # django < 1.5 from django.contrib.auth.models import User def get_user_model(): return User class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'SocialAccount' db.create_table('socialaccount_socialaccount', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=get_user_model()) ('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ))) db.send_create_signal('socialaccount', ['SocialAccount']) def backwards(self, orm): # Deleting model 'SocialAccount' db.delete_table('socialaccount_socialaccount') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'socialaccount.socialaccount': { 'Meta': {'object_name': 'SocialAccount'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['socialaccount']
mit
2,162,468,607,797,285,600
56.228916
182
0.576632
false
Narrato/mongotron
mongotron/Document.py
1
21266
from __future__ import absolute_import import logging import warnings from bson.objectid import ObjectId, InvalidId from .exceptions import ValidationError from .ConnectionManager import GetConnectionManager from .Cursor import Cursor from . import field_types LOG = logging.getLogger('mongotron.Document') class classproperty(object): """Equivalent to property() on a class, i.e. invoking the descriptor results in the wrapped function being invoked and its return value being used as the descriptor's value. """ def __init__(self, f): self.f = f def __get__(self, obj, owner): return self.f(owner) class DocumentMeta(type): """This is the metaclass for :py:class:`Document`; it is responsible for merging :py:attr:`Document.structure`, :py:attr:`Document.field_map` and :py:attr:`Document.default_values` with any base classes. After this is done, it synthesizes a new :py:attr:`Document.field_types` mapping using :py:class:`mongotron.field_types.Field` Field instances. """ INHERITED_DICTS = ['structure', 'default_values', 'field_map'] INHERITED_SETS = ['required', 'write_once'] def __new__(cls, name, bases, attrs): for base in bases: parent = base.__mro__[0] for dname in cls.INHERITED_DICTS: cls.merge_carefully(parent, dname, attrs) for sname in cls.INHERITED_SETS: val = set() val.update(attrs.get(sname, [])) for base in bases: val.update(vars(base.__mro__[0]).get(sname, [])) attrs[sname] = val cls.check_field_map(name, attrs) cls.make_inverse_map(attrs) attrs['field_types'] = cls.make_field_types(attrs) attrs['__collection__'] = cls.make_collection_name(name, attrs) attrs.setdefault('__manager__', GetConnectionManager()) attrs.setdefault('__connection__', None) # Fields are descriptors for their corresponding attribute. attrs.update(attrs['field_types']) # print '----------------------------------------' # pprint(attrs) # print '----------------------------------------' return type.__new__(cls, name, bases, attrs) @classmethod def check_field_map(cls, name, attrs): # Can't use LOG since logging package probably isn't configured while # models are being imported. for field_name in attrs['structure']: if field_name not in attrs['field_map']: warnings.warn('%s.%s has no short name for %r' % (attrs['__module__'], name, field_name)) @classmethod def make_inverse_map(cls, attrs): attrs['inverse_field_map'] = dct = {} for canon, short in attrs['field_map'].iteritems(): if short in dct: raise TypeError('duplicate short key %r for field %r ' '(already used for field %r)' %\ (short, canon, dct[short])) dct[short] = canon it = attrs['field_map'].iteritems() attrs['inverse_field_map'] = dict((k, v) for k, v in it) @classmethod def merge_carefully(cls, base, dname, attrs): """Merge the contents a base class attribute's dictionary into the child, complaining bitterly if a duplicate entry exists.""" base_dct = getattr(base, dname, {}) dct = attrs.setdefault(dname, {}) assert all(type(d) is dict for d in (base_dct, dct)) for key, value in base_dct.iteritems(): if key in dct and key != '_id': raise TypeError('duplicate %r of dict %r appears in base %r' %\ (key, dname, base)) dct[key] = value @classmethod def make_collection_name(cls, name, attrs): """Form a collection name for the class, or use the user-provided name.""" return str(attrs.get('__collection__', name.lower())) @classmethod def make_field_types(cls, attrs): """Return a mapping of field names to :py:class:`Field` instances describing that field. """ types = {} for name, desc in attrs['structure'].iteritems(): types[name] = field_types.parse(desc, required=name in attrs['required'], default=attrs['default_values'].get(name), name=name, write_once=name in attrs['write_once']) return types class Document(object): """A class with property-style access. It maps attribute access to an internal dictionary, and tracks changes. """ __metaclass__ = DocumentMeta __should_explain = False #: Map of canonical field names to objects representing the required type #: for that field. structure = { '_id': field_types.ObjectIdField(name='_id', doc="""The underlying document's _id field, or ``None`` if the document has never been saved.""", readonly=True) } #: List of canonical field names that absolutely must be set prior to save. #: Automatically populated by metaclass. required = [] #: List of canonical field names that cannot be overwritten once they have #: been set. write_once = [] #: Map of canonical field names to their default values. default_values = {} #: Map of canonical field names to shortened field names. Automatically #: populated by metaclass. field_map = { '_id': '_id' } #: Map of shortened field names to canonical field names. Automatically #: populated by metaclass. inverse_field_map = {} #: Map of canonical field names to Field instances describing the field. #: Automatically populated by metaclass. field_types = {} def validate(self): """Hook invoked prior to creating or updating document, but after :py:meth:`pre_save`, :py:meth:`pre_update` or :py:meth:`pre_insert` have run. Expected to raise an exception if the document's structure does not make sense. The base implementation must be called in order to handle :py:attr:`Document.required` processing. """ missing = self.required.difference(self.__attributes) if missing: raise ValidationError('missing required fields: %s' %\ (', '.join(missing),)) def on_load(self): """Hook invoked while document is being initialized. :py:meth:`on_load` is only called if the Document is being loaded from mongo (i.e. is it being initialized with Data) Override in your subclass as desired, but remember to call super() """ def pre_save(self): """Hook invoked prior to creating or updating a document. :py:meth:`pre_save` is always invoked before :py:meth:`pre_insert` or :py:meth:`pre_update`. Any mutations produced by :py:meth:`pre_save` will be reflected in the saved document. Override in your subclass as desired.""" def post_save(self): """Hook invoked after a document has been created or updated successfully. :py:meth:`pre_save` is always invoked before :py:meth:`pre_insert` or :py:meth:`pre_update`. Override in your subclass as desired.""" def pre_insert(self): """Hook invoked prior to document creation, but after :py:meth:`pre_save`. Any mutations produced by :py:meth:`pre_insert` will be reflected in the saved document. Override in your subclass as desired.""" def post_insert(self): """Hook invoked after document creation, but after :py:meth:`post_save`. Override in your subclass as desired.""" def pre_update(self): """Hook invoked prior to document update, but after :py:meth:`pre_save`. Any mutations produced by :py:meth:`pre_update` will be reflected in the saved document. Override in your subclass as desired.""" def post_update(self): """Hook invoked after document update, but after :py:meth:`post_save`. Override in your subclass as desired.""" @classproperty def _dbcollection(cls): conn = cls.__manager__.get_connection(cls.__connection__, True) try: return conn[cls.__db__][cls.__collection__] except AttributeError: raise AttributeError('__db__ field is not set on your object!') @classmethod def long_to_short(cls, long_key): """Return the shortened field name for `long_key`, returning `long_key` if no short version exists.""" return cls.field_map.get(long_key, long_key) @classmethod def short_to_long(cls, short_key): """Return the canonical field name for `short_key`, returning `short_key` if no canonical version exists.""" return cls.inverse_field_map.get(short_key, short_key) def merge_dict(self, dct): """Load keys and collapsed values from `dct`. """ for key, field in self.field_types.iteritems(): short = self.long_to_short(key) if short in dct: self.__attributes[key] = dct[short] elif key in dct: self.__attributes[key] = dct[key] elif key in self.default_values: self.set(key, field.make()) def load_dict(self, dct): """Reset the document to an empty state, then load keys and values from the dictionary `doc`.""" self.clear_ops() self.__attributes = {} self.merge_dict(dct) self.__identity = self.identity() def to_json_dict(self, **kwargs): return dict() def from_json_dict(self, json_dict): pass def __init__(self, doc=None): self.load_dict(doc or {}) if doc: self.on_load() def __setattr__(self, name, value): """Nasty guard to prevent object writes for nonexistent attributes. It should be possible to replace this with the ``__slots__`` mechanism, but there is some apparent incompatibility with using metaclasses and weakrefs simultaneously.""" if name.startswith('_'): vars(self)[name] = value else: getattr(self.__class__, name).__set__(self, value) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.__attributes) def __contains__(self, key): return key in self.__attributes def __eq__(self, other): return (self.__class__ == other.__class__ and self._id and other._id and self._id == other._id) def __hash__(self): return hash(self._id) @property def embedded(self): """For future embedded docs, will stop save() from working and will let the parent know it can be converted into a dict!""" return self._embedded def get(self, key): """Fetch the value of `key` from the underlying document, returning ``None`` if the value does not exist.""" return self.__attributes.get(key) # mongo operation wrappers! def add_operation(self, op, key, val): """Arrange for the Mongo operation `op` to be applied to the document property `key` with the operand `value` during save. """ if op != '$unset': try: val = self.field_types[key].collapse(val) except KeyError: raise KeyError('%r is not a settable key' % (key,)) # We should probably make this smarter so you can't set a top level # array and a component at the same time though if you're doing that, # your code is broken anyway key = self.long_to_short(key) op_dict = self.__ops.setdefault(op, {}) if op == '$set': raise ValueError('$set is an invalid operation!') elif op == '$inc' or op == '$dec': op_dict[key] = val elif op == '$addToSet': # $addToSet gets special handling because we use the $each version if not key in op_dict: op_dict[key] = { '$each':[] } param_dict = op_dict[key] param_list = param_dict['$each'] if isinstance(val, list): param_list.extend(val) else: param_list.append(val) else: param_list = op_dict.setdefault(key, []) if isinstance(val, list): param_list.extend(val) else: param_list.append(val) @property def operations(self): # construct the $set changes ops = self.__ops.copy() x = {} for key in self.__dirty_fields: if key in self.__attributes: x[self.long_to_short(key)] = self.__attributes[key] ops['$set'] = x return ops def clear_ops(self): """Reset the list of field changes tracked on this document. Note this will not reset field values to their original. """ self.__ops = {} self.__dirty_fields = set() # MONGO MAGIC HAPPENS HERE! def mark_dirty(self, key): """Explicitly mark the field `key` as modified, so that it will be mutated during :py:meth:`save`.""" #TODO: add this key to the $set listd self.__dirty_fields.add(key) def set(self, key, value): """Unconditionally set the underlying document field `key` to `value`. If `value` is ``None``, then behave as if :py:meth:`unset` was invoked. Note this does no type checking or field name mapping; you may use it to directly modify the underlying Mongo document. """ if value is None: return self.unset(key) self.__dirty_fields.add(key) self.__attributes[key] = value # Everything about this is stupid. Needs general solution, see bug #1 short = self.long_to_short(key) for op, fields in self.__ops.iteritems(): fields.pop(short, None) def unset(self, key): """Unconditionally remove the underlying document field `key`. Note this does no type checking or field name mapping; you may use it to directly modify the underlying Mongo document. This operation can also be unvoked using `del`: :: >>> # Equivalent to instance.unset('attr'): >>> del instance.attr """ self.__attributes.pop(key, None) self.add_operation('$unset', key, 1) __delattr__ = unset def inc(self, key, value=1): """Increment the value of `key` by `value`. """ self.add_operation('$inc', key, value) def dec(self, key, value=1): """Decrement the value of `key` by `value`. """ self.add_operation('$inc', key, -abs(value)) #addToSet gets special handling because we use the $each version def addToSet(self, key, value): #this is a bit more complicated #what we need to do is store an "each" part self.add_operation('$addToSet', key, value) # we translate push and pull into pushAll and pullAll # so that we can queue up the operations! def pull(self, key, value): #this is a bit more complicated #what we need to do is store an "each" part self.add_operation('$pullAll', key, value) def push(self, key, value): """Append `value` to the list-valued `key`. """ self.add_operation('$pushAll', key, value) def identity(self): """Return a MongoDB query that matches this document. Needed to correctly form findAndModify command in a sharded environment (i.e. you must override this and have it return a query that matches all fields in the assocated collection's shardkey. Base implementation simply returns _id or a new ObjectId. """ return {'_id': self._id or ObjectId()} def save(self, safe=True): """Insert the document into the underlying collection if it is unsaved, otherwise update the existing document. `safe`: Does nothing, yet. """ self.pre_save() # NOTE: called BEFORE we get self.operations to allow the pre_ # functions to add to the set of operations. (i.e. set last modified # fields etc) new = self._id is None if new: self.pre_insert() else: self.pre_update() self.validate() col = self._dbcollection ops = self.operations if ops: res = col.find_and_modify(query=self.__identity, update=ops, upsert=True, new=True) self.load_dict(res) if new: self.post_insert() else: self.post_update() self.clear_ops() self.post_save() def delete(self): """Delete the underlying document. Returns ``True`` if the document was deleted, otherwise ``False`` if it did not exist. """ # TODO: parse returned ack dict to ensure a deletion occurred. assert self._id, 'Cannot delete unsaved Document' self._dbcollection.remove({'_id':self._id}) return True @classmethod def map_search_list(cls, search_list): newlist = [] for v in search_list: if isinstance(v, dict): v = cls.map_search_dict(v) elif isinstance(v,list): v = cls.map_search_list(v) newlist.append(v) return newlist @classmethod def map_search_dict(cls, search_dict): newdict = {} for k in search_dict: v = search_dict[k] if isinstance(v, dict): v = cls.map_search_dict(v) elif isinstance(v, list): v = cls.map_search_list(v) k = cls.long_to_short(k) newdict[k] = v return newdict @classmethod def find(cls, *args, **kwargs): """Like :py:meth:`Collection.find <pymongo.collection.Collection.find>` """ if 'spec' in kwargs: kwargs['spec'] = cls.map_search_dict(kwargs['spec']) args = list(args) if len(args): args[0] = cls.map_search_dict(args[0]) if 'slave_okay' not in kwargs and hasattr(cls._dbcollection, 'slave_okay'): kwargs['slave_okay'] = cls._dbcollection.slave_okay if 'read_preference' not in kwargs and hasattr(cls._dbcollection, 'read_preference'): kwargs['read_preference'] = cls._dbcollection.read_preference if 'tag_sets' not in kwargs and hasattr(cls._dbcollection, 'tag_sets'): kwargs['tag_sets'] = cls._dbcollection.tag_sets if 'secondary_acceptable_latency_ms' not in kwargs and \ hasattr(cls._dbcollection, 'secondary_acceptable_latency_ms'): kwargs['secondary_acceptable_latency_ms'] = ( cls._dbcollection.secondary_acceptable_latency_ms ) return Cursor(cls._dbcollection, document_class=cls, *args, **kwargs) @classmethod def find_one(cls, spec_or_id=None, *args, **kwargs): """Find a document with the given ObjectID `spec_or_id`. You can pass an ObjectId in and it'll auto-search on the _id field. Like :py:meth:`Collection.find_one <pymongo.collection.Collection.find_one>` """ if 'spec' in kwargs: kwargs['spec'] = cls.map_search_dict(kwargs['spec']) args = list(args) if len(args): args[0] = cls.map_search_dict(args[0]) collection = cls._dbcollection #thing = collection.find_one(*args,**kwargs) if spec_or_id is not None and not isinstance(spec_or_id, dict): spec_or_id = {"_id": spec_or_id} for result in cls.find(spec_or_id, *args, **kwargs).limit(-1): return result return None @classmethod def update(cls, spec, document, **kwargs): """Modify existing documents matching `spec` using the operations from `update`. Like :py:meth:`Collection.update <pymongo.collection.Collection.update>` """ #TODO: implement update return cls._dbcollection.update(spec, document, **kwargs) @classmethod def get_by_id(cls, oid): """ Get a document by a specific ID. This is mapped to the _id field. You can pass a string or an ObjectId. """ # Convert id to ObjectId if isinstance(oid, basestring): try: oid = ObjectId(oid) except: return None elif not isinstance(oid, ObjectId): raise ValueError('oid should be an ObjectId or string') return cls.find_one({'_id':oid}) def document_as_dict(self): """Return a dict representation of the document suitable for encoding as BSON.""" x = {} for key, val in self.__attributes.iteritems(): x[self.long_to_short(key)] = val return x
bsd-2-clause
4,267,603,442,421,332,500
34.443333
93
0.582009
false
puiterwijk/LocalBadge
setup.py
1
1188
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright © 2015 Patrick Uiterwijk <[email protected]> # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v2, or (at your option) any later # version. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. You # should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # from setuptools import setup setup(name='LocalBadgeIssuance', version='1.0', description='Issue badges at an event', author='Patrick Uiterwijk', author_email='[email protected]/', url='https://github.com/puiterwijk/LocalBadge', license='GPLv2+', install_requires=['Flask', 'Flask-SQLAlchemy'], )
gpl-2.0
-4,144,431,248,314,628,600
38.566667
71
0.711879
false
dims/cinder
cinder/backup/driver.py
2
15981
# Copyright (C) 2013 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for all backup drivers.""" import abc from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import six from cinder.db import base from cinder import exception from cinder.i18n import _, _LI, _LW from cinder import keymgr service_opts = [ cfg.IntOpt('backup_metadata_version', default=2, help='Backup metadata version to be used when backing up ' 'volume metadata. If this number is bumped, make sure the ' 'service doing the restore supports the new version.'), cfg.IntOpt('backup_object_number_per_notification', default=10, help='The number of chunks or objects, for which one ' 'Ceilometer notification will be sent'), cfg.IntOpt('backup_timer_interval', default=120, help='Interval, in seconds, between two progress notifications ' 'reporting the backup status'), ] CONF = cfg.CONF CONF.register_opts(service_opts) LOG = logging.getLogger(__name__) class BackupMetadataAPI(base.Base): TYPE_TAG_VOL_BASE_META = 'volume-base-metadata' TYPE_TAG_VOL_META = 'volume-metadata' TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata' def __init__(self, context, db_driver=None): super(BackupMetadataAPI, self).__init__(db_driver) self.context = context @staticmethod def _is_serializable(value): """Returns True if value is serializable.""" try: jsonutils.dumps(value) except TypeError: LOG.info(_LI("Value with type=%s is not serializable"), type(value)) return False return True def _save_vol_base_meta(self, container, volume_id): """Save base volume metadata to container. This will fetch all fields from the db Volume object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_BASE_META LOG.debug("Getting metadata type '%s'", type_tag) meta = self.db.volume_get(self.context, volume_id) if meta: container[type_tag] = {} for key, value in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(value): LOG.info(_LI("Unable to serialize field '%s' - excluding " "from backup"), key) continue # Copy the encryption key uuid for backup if key is 'encryption_key_id' and value is not None: value = keymgr.API().copy_key(self.context, value) LOG.debug("Copying encryption key uuid for backup.") container[type_tag][key] = value LOG.debug("Completed fetching metadata type '%s'", type_tag) else: LOG.debug("No metadata type '%s' available", type_tag) def _save_vol_meta(self, container, volume_id): """Save volume metadata to container. This will fetch all fields from the db VolumeMetadata object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_META LOG.debug("Getting metadata type '%s'", type_tag) meta = self.db.volume_metadata_get(self.context, volume_id) if meta: container[type_tag] = {} for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(meta[entry]): LOG.info(_LI("Unable to serialize field '%s' - excluding " "from backup"), entry) continue container[type_tag][entry] = meta[entry] LOG.debug("Completed fetching metadata type '%s'", type_tag) else: LOG.debug("No metadata type '%s' available", type_tag) def _save_vol_glance_meta(self, container, volume_id): """Save volume Glance metadata to container. This will fetch all fields from the db VolumeGlanceMetadata object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_GLANCE_META LOG.debug("Getting metadata type '%s'", type_tag) try: meta = self.db.volume_glance_metadata_get(self.context, volume_id) if meta: container[type_tag] = {} for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(entry.value): LOG.info(_LI("Unable to serialize field '%s' - " "excluding from backup"), entry) continue container[type_tag][entry.key] = entry.value LOG.debug("Completed fetching metadata type '%s'", type_tag) except exception.GlanceMetadataNotFound: LOG.debug("No metadata type '%s' available", type_tag) @staticmethod def _filter(metadata, fields): """Returns set of metadata restricted to required fields. If fields is empty list, the full set is returned. """ if not fields: return metadata subset = {} for field in fields: if field in metadata: subset[field] = metadata[field] else: LOG.debug("Excluding field '%s'", field) return subset def _restore_vol_base_meta(self, metadata, volume_id, fields): """Restore values to Volume object for provided fields.""" LOG.debug("Restoring volume base metadata") # Ignore unencrypted backups. key = 'encryption_key_id' if key in fields and key in metadata and metadata[key] is not None: self._restore_vol_encryption_meta(volume_id, metadata['volume_type_id']) metadata = self._filter(metadata, fields) self.db.volume_update(self.context, volume_id, metadata) def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id): """Restores the volume_type_id for encryption if needed. Only allow restoration of an encrypted backup if the destination volume has the same volume type as the source volume. Otherwise encryption will not work. If volume types are already the same, no action is needed. """ dest_vol = self.db.volume_get(self.context, volume_id) if dest_vol['volume_type_id'] != src_volume_type_id: LOG.debug("Volume type id's do not match.") # If the volume types do not match, and the destination volume # does not have a volume type, force the destination volume # to have the encrypted volume type, provided it still exists. if dest_vol['volume_type_id'] is None: try: self.db.volume_type_get( self.context, src_volume_type_id) except exception.VolumeTypeNotFound: LOG.debug("Volume type of source volume has been " "deleted. Encrypted backup restore has " "failed.") msg = _("The source volume type '%s' is not " "available.") % (src_volume_type_id) raise exception.EncryptedBackupOperationFailed(msg) # Update dest volume with src volume's volume_type_id. LOG.debug("The volume type of the destination volume " "will become the volume type of the source " "volume.") self.db.volume_update(self.context, volume_id, {'volume_type_id': src_volume_type_id}) else: # Volume type id's do not match, and destination volume # has a volume type. Throw exception. LOG.warning(_LW("Destination volume type is different from " "source volume type for an encrypted volume. " "Encrypted backup restore has failed.")) msg = (_("The source volume type '%(src)s' is different " "than the destination volume type '%(dest)s'.") % {'src': src_volume_type_id, 'dest': dest_vol['volume_type_id']}) raise exception.EncryptedBackupOperationFailed(msg) def _restore_vol_meta(self, metadata, volume_id, fields): """Restore values to VolumeMetadata object for provided fields.""" LOG.debug("Restoring volume metadata") metadata = self._filter(metadata, fields) self.db.volume_metadata_update(self.context, volume_id, metadata, True) def _restore_vol_glance_meta(self, metadata, volume_id, fields): """Restore values to VolumeGlanceMetadata object for provided fields. First delete any existing metadata then save new values. """ LOG.debug("Restoring volume glance metadata") metadata = self._filter(metadata, fields) self.db.volume_glance_metadata_delete_by_volume(self.context, volume_id) for key, value in metadata.items(): self.db.volume_glance_metadata_create(self.context, volume_id, key, value) # Now mark the volume as bootable self.db.volume_update(self.context, volume_id, {'bootable': True}) def _v1_restore_factory(self): """All metadata is backed up but we selectively restore. Returns a dictionary of the form: {<type tag>: (<restore function>, <fields list>)} Empty field list indicates that all backed up fields should be restored. """ return {self.TYPE_TAG_VOL_META: (self._restore_vol_meta, []), self.TYPE_TAG_VOL_GLANCE_META: (self._restore_vol_glance_meta, [])} def _v2_restore_factory(self): """All metadata is backed up but we selectively restore. Returns a dictionary of the form: {<type tag>: (<restore function>, <fields list>)} Empty field list indicates that all backed up fields should be restored. """ return {self.TYPE_TAG_VOL_BASE_META: (self._restore_vol_base_meta, ['encryption_key_id']), self.TYPE_TAG_VOL_META: (self._restore_vol_meta, []), self.TYPE_TAG_VOL_GLANCE_META: (self._restore_vol_glance_meta, [])} def get(self, volume_id): """Get volume metadata. Returns a json-encoded dict containing all metadata and the restore version i.e. the version used to decide what actually gets restored from this container when doing a backup restore. """ container = {'version': CONF.backup_metadata_version} self._save_vol_base_meta(container, volume_id) self._save_vol_meta(container, volume_id) self._save_vol_glance_meta(container, volume_id) if container: return jsonutils.dumps(container) else: return None def put(self, volume_id, json_metadata): """Restore volume metadata to a volume. The json container should contain a version that is supported here. """ meta_container = jsonutils.loads(json_metadata) version = meta_container['version'] if version == 1: factory = self._v1_restore_factory() elif version == 2: factory = self._v2_restore_factory() else: msg = (_("Unsupported backup metadata version (%s)") % (version)) raise exception.BackupMetadataUnsupportedVersion(msg) for type in factory: func = factory[type][0] fields = factory[type][1] if type in meta_container: func(meta_container[type], volume_id, fields) else: LOG.debug("No metadata of type '%s' to restore", type) @six.add_metaclass(abc.ABCMeta) class BackupDriver(base.Base): def __init__(self, context, db_driver=None): super(BackupDriver, self).__init__(db_driver) self.context = context self.backup_meta_api = BackupMetadataAPI(context, db_driver) # This flag indicates if backup driver supports force # deletion. So it should be set to True if the driver that inherits # from BackupDriver supports the force deletion function. self.support_force_delete = False def get_metadata(self, volume_id): return self.backup_meta_api.get(volume_id) def put_metadata(self, volume_id, json_metadata): self.backup_meta_api.put(volume_id, json_metadata) @abc.abstractmethod def backup(self, backup, volume_file, backup_metadata=False): """Start a backup of a specified volume.""" return @abc.abstractmethod def restore(self, backup, volume_id, volume_file): """Restore a saved backup.""" return @abc.abstractmethod def delete(self, backup): """Delete a saved backup.""" return def export_record(self, backup): """Export driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must overwrite this method and return it here as a dictionary so it can be serialized into a string. Default backup driver implementation has no extra information. :param backup: backup object to export :returns: driver_info - dictionary with extra information """ return {} def import_record(self, backup, driver_info): """Import driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must overwrite this method since it will be called with the extra information that was provided by export_record when exporting the backup. Default backup driver implementation does nothing since it didn't export any specific data in export_record. :param backup: backup object to export :param driver_info: dictionary with driver specific backup record information :returns: nothing """ return @six.add_metaclass(abc.ABCMeta) class BackupDriverWithVerify(BackupDriver): @abc.abstractmethod def verify(self, backup): """Verify that the backup exists on the backend. Verify that the backup is OK, possibly following an import record operation. :param backup: backup id of the backup to verify :raises: InvalidBackup, NotImplementedError """ return
apache-2.0
1,127,676,597,813,030,800
39.153266
79
0.592203
false
tensorflow/agents
tf_agents/agents/categorical_dqn/categorical_dqn_agent_test.py
1
20157
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for agents.dqn.categorical_dqn_agent.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.networks import categorical_q_network from tf_agents.networks import network from tf_agents.networks import q_rnn_network from tf_agents.specs import tensor_spec from tf_agents.trajectories import policy_step from tf_agents.trajectories import test_utils from tf_agents.trajectories import time_step as ts from tf_agents.trajectories import trajectory from tf_agents.utils import common class DummyCategoricalNet(network.Network): def __init__(self, input_tensor_spec, num_atoms=51, num_actions=2, name=None): self._num_atoms = num_atoms self._num_actions = num_actions super(DummyCategoricalNet, self).__init__( input_tensor_spec=input_tensor_spec, state_spec=(), name=name) # In CategoricalDQN we are dealing with a distribution over Q-values, which # are represented as num_atoms bins, ranging from min_q_value to # max_q_value. In order to replicate the setup in the non-categorical # network (namely, [[2, 1], [1, 1]]), we use the following "logits": # [[0, 1, ..., num_atoms-1, num_atoms, 1, ..., 1], # [1, ......................................, 1]] # The important bit is that the first half of the first list (which # corresponds to the logits for the first action) place more weight on the # higher q_values than on the lower ones, thereby resulting in a higher # value for the first action. weights_initializer = np.array([ np.concatenate((np.arange(num_atoms), np.ones(num_atoms))), np.concatenate((np.ones(num_atoms), np.ones(num_atoms)))]) kernel_initializer = tf.constant_initializer(weights_initializer) bias_initializer = tf.keras.initializers.Ones() # Store custom layers that can be serialized through the Checkpointable API. self._dummy_layers = [] self._dummy_layers.append( tf.keras.layers.Dense( num_actions * num_atoms, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)) @property def num_atoms(self): return self._num_atoms def call(self, inputs, step_type=None, network_state=()): del step_type inputs = tf.cast(inputs, tf.float32) for layer in self._dummy_layers: inputs = layer(inputs) logits = tf.reshape(inputs, [-1, self._num_actions, self._num_atoms]) return logits, network_state class KerasLayersNet(network.Network): def __init__(self, observation_spec, action_spec, layer, num_atoms=5, name=None): super(KerasLayersNet, self).__init__(observation_spec, state_spec=(), name=name) self._layer = layer self.num_atoms = num_atoms # Dummy, this doesn't match the layer output. def call(self, inputs, step_type=None, network_state=()): del step_type return self._layer(inputs), network_state def create_variables(self, input_spec=None): output_spec = network.create_variables( self._layer, input_spec or self._input_tensor_spec) self._network_output_spec = output_spec self.built = True return output_spec class DummyCategoricalQRnnNetwork(q_rnn_network.QRnnNetwork): def __init__(self, input_tensor_spec, action_spec, num_atoms=51, **kwargs): if not isinstance(action_spec, tensor_spec.BoundedTensorSpec): raise TypeError('action_spec must be a BoundedTensorSpec. Got: %s' % ( action_spec,)) self._num_actions = action_spec.maximum - action_spec.minimum + 1 self._num_atoms = num_atoms q_network_action_spec = tensor_spec.BoundedTensorSpec( (), tf.int32, minimum=0, maximum=self._num_actions * num_atoms - 1) super(DummyCategoricalQRnnNetwork, self).__init__( input_tensor_spec=input_tensor_spec, action_spec=q_network_action_spec, **kwargs) @property def num_atoms(self): return self._num_atoms def call(self, observations, step_type=None, network_state=()): logits, network_state = super(DummyCategoricalQRnnNetwork, self).call( observations, step_type, network_state) shape = logits.shape.as_list() assert shape[-1] == self._num_actions * self._num_atoms new_shape = shape[:-1] + [self._num_actions, self._num_atoms] logits = tf.reshape(logits, new_shape) return logits, network_state class CategoricalDqnAgentTest(tf.test.TestCase): def setUp(self): super(CategoricalDqnAgentTest, self).setUp() tf.compat.v1.enable_resource_variables() self._obs_spec = tensor_spec.TensorSpec([2], tf.float32) self._time_step_spec = ts.time_step_spec(self._obs_spec) self._action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1) self._categorical_net = categorical_q_network.CategoricalQNetwork( self._obs_spec, self._action_spec, fc_layer_params=[4]) self._dummy_categorical_net = DummyCategoricalNet(self._obs_spec) self._optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.01) def testCreateAgentNestSizeChecks(self): action_spec = [ tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1), tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1) ] with self.assertRaisesRegex(ValueError, 'Only scalar actions'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, action_spec, self._dummy_categorical_net, self._optimizer) def testCreateAgentDimChecks(self): action_spec = tensor_spec.BoundedTensorSpec([1, 2], tf.int32, 0, 1) with self.assertRaisesRegex(ValueError, 'Only scalar actions'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, action_spec, self._dummy_categorical_net, self._optimizer) def testCreateAgentDefaultNetwork(self): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) def testCreateAgentWithPrebuiltPreprocessingLayers(self): dense_layer = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Flatten(), tf.keras.layers.Reshape([2, 5]), ]) q_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer) with self.assertRaisesRegexp( ValueError, 'shares weights with the original network'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, categorical_q_network=q_net, optimizer=None) # Explicitly share weights between q and target networks. # This would be an unusual setup so we check that an error is thrown. q_target_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer) with self.assertRaisesRegexp( ValueError, 'shares weights with the original network'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, categorical_q_network=q_net, optimizer=None, target_categorical_q_network=q_target_net) def testCreateAgentWithPrebuiltPreprocessingLayersDiffAtoms(self): dense_layer = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Flatten(), tf.keras.layers.Reshape([2, 5]), ]) q_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer) dense_layer_target = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Reshape([2, 5]), ]) q_bad_target_net = KerasLayersNet( self._time_step_spec.observation, self._action_spec, dense_layer_target, num_atoms=3) with self.assertRaisesRegexp(ValueError, 'have different numbers of atoms'): categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, categorical_q_network=q_net, optimizer=None, target_categorical_q_network=q_bad_target_net) def testCriticLoss(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._dummy_categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32) next_time_steps = ts.transition(next_observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. expected_loss = 2.19525 loss_info = agent._loss(experience) self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss = self.evaluate(loss_info).loss self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testCriticLossWithMaskedActions(self): # Observations are now a tuple of the usual observation and an action mask. observation_spec_with_mask = ( self._obs_spec, tensor_spec.BoundedTensorSpec([2], tf.int32, 0, 1)) time_step_spec = ts.time_step_spec(observation_spec_with_mask) dummy_categorical_net = DummyCategoricalNet(self._obs_spec) agent = categorical_dqn_agent.CategoricalDqnAgent( time_step_spec, self._action_spec, dummy_categorical_net, self._optimizer, observation_and_action_constraint_splitter=lambda x: (x[0], x[1])) # For `observations`, the masks are set up so that only one action is valid # for each element in the batch. observations = (tf.constant([[1, 2], [3, 4]], dtype=tf.float32), tf.constant([[1, 0], [0, 1]], dtype=tf.int32)) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) # For `next_observations`, the masks are set up so the opposite actions as # before are valid. next_observations = (tf.constant([[5, 6], [7, 8]], dtype=tf.float32), tf.constant([[0, 1], [1, 0]], dtype=tf.int32)) next_time_steps = ts.transition(next_observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. Note this is different from the loss in # testCriticLoss above due to previously optimal actions being masked out. expected_loss = 5.062895 loss_info = agent._loss(experience) self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss = self.evaluate(loss_info).loss self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testCriticLossNStep(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._dummy_categorical_net, self._optimizer, n_step_update=2) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32) next_time_steps = ts.transition(next_observations, rewards, discounts) third_observations = tf.constant([[9, 10], [11, 12]], dtype=tf.float32) third_time_steps = ts.transition(third_observations, rewards, discounts) experience1 = trajectory.from_transition( time_steps, action_steps, next_time_steps) experience2 = trajectory.from_transition( next_time_steps, action_steps, third_time_steps) experience3 = trajectory.from_transition( third_time_steps, action_steps, third_time_steps) experience = tf.nest.map_structure( lambda x, y, z: tf.stack([x, y, z], axis=1), experience1, experience2, experience3) loss_info = agent._loss(experience) # discounted_returns should evaluate to 10 + 0.9 * 10 = 19 and # 20 + 0.9 * 20 = 38. evaluated_discounted_returns = self.evaluate(agent._discounted_returns) self.assertAllClose(evaluated_discounted_returns, [[19], [38]], atol=1e-4) # Both final_value_discount values should be 0.9 * 0.9 = 0.81. evaluated_final_value_discount = self.evaluate(agent._final_value_discount) self.assertAllClose(evaluated_final_value_discount, [[0.81], [0.81]], atol=1e-4) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. expected_loss = 2.19525 self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss = self.evaluate(loss_info).loss self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testPolicy(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions, _, _ = agent.policy.action(time_steps) self.assertEqual(actions.shape, [2]) self.evaluate(tf.compat.v1.global_variables_initializer()) actions_ = self.evaluate(actions) self.assertTrue(all(actions_ <= self._action_spec.maximum)) self.assertTrue(all(actions_ >= self._action_spec.minimum)) def testInitialize(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_time_steps = ts.transition(observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) loss_info = agent._loss(experience) initialize = agent.initialize() self.evaluate(tf.compat.v1.global_variables_initializer()) losses = self.evaluate(loss_info).loss self.assertGreater(losses, 0.0) critic_variables = agent._q_network.variables target_critic_variables = agent._target_q_network.variables self.assertTrue(critic_variables) self.assertTrue(target_critic_variables) self.evaluate(initialize) for s, t in zip(critic_variables, target_critic_variables): self.assertAllClose(self.evaluate(s), self.evaluate(t)) def testUpdateTarget(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, time_steps) loss_info = agent._loss(experience) update_targets = agent._update_target() self.evaluate(tf.compat.v1.global_variables_initializer()) losses = self.evaluate(loss_info).loss self.assertGreater(losses, 0.0) self.evaluate(update_targets) def testTrain(self): agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, self._action_spec, self._dummy_categorical_net, self._optimizer) observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) time_steps = ts.restart(observations, batch_size=2) actions = tf.constant([0, 1], dtype=tf.int32) action_steps = policy_step.PolicyStep(actions) rewards = tf.constant([10, 20], dtype=tf.float32) discounts = tf.constant([0.9, 0.9], dtype=tf.float32) next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32) next_time_steps = ts.transition(next_observations, rewards, discounts) experience = test_utils.stacked_trajectory_from_transition( time_steps, action_steps, next_time_steps) train_step = agent.train(experience, weights=None) # Due to the constant initialization of the DummyCategoricalNet, we can # expect the same loss every time. expected_loss = 2.19525 self.evaluate(tf.compat.v1.global_variables_initializer()) evaluated_loss, _ = self.evaluate(train_step) self.assertAllClose(evaluated_loss, expected_loss, atol=1e-4) def testTrainWithRnn(self): action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1) batch_size = 5 observations = tf.constant( [[[1, 2], [3, 4], [5, 6]]] * batch_size, dtype=tf.float32) actions = tf.constant([[0, 1, 1]] * batch_size, dtype=tf.int32) time_steps = ts.TimeStep( step_type=tf.constant([[1] * 3] * batch_size, dtype=tf.int32), reward=tf.constant([[1] * 3] * batch_size, dtype=tf.float32), discount=tf.constant([[1] * 3] * batch_size, dtype=tf.float32), observation=[observations]) experience = trajectory.Trajectory( step_type=time_steps.step_type, observation=observations, action=actions, policy_info=(), next_step_type=time_steps.step_type, reward=time_steps.reward, discount=time_steps.discount) categorical_q_rnn_network = DummyCategoricalQRnnNetwork( self._obs_spec, action_spec, conv_layer_params=None, input_fc_layer_params=(16,), preprocessing_combiner=None, lstm_size=(40,), output_fc_layer_params=(16,), ) counter = common.create_variable('test_train_counter') agent = categorical_dqn_agent.CategoricalDqnAgent( self._time_step_spec, action_spec, categorical_q_rnn_network, optimizer=tf.compat.v1.train.AdamOptimizer(0.001), ) # Force variable creation. agent.policy.variables() if tf.executing_eagerly(): loss = lambda: agent.train(experience) else: loss = agent.train(experience) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertEqual(self.evaluate(counter), 0) self.evaluate(loss) if __name__ == '__main__': tf.test.main()
apache-2.0
-4,279,109,489,371,291,000
37.467557
80
0.668949
false
bschug/bindirpatch
utils.py
1
2756
import os import subprocess import sys SEVENZIP_EXE = os.path.join('.', '7zip', 'x64', '7za.exe') BSDIFF_EXE = os.path.join('.', 'bsdiff', 'bsdiff.exe') BSPATCH_EXE = os.path.join('.', 'bsdiff', 'bspatch.exe') def bsdiff(oldFile, newFile, patchFile, silent=False): """Creates a binary diff between <oldFile> and <newFile> and stores it in <patchFile>""" subprocess.call([BSDIFF_EXE, oldFile, newFile, patchFile], stdout=get_stdout(silent)) def bspatch(oldFile, newFile, patchFile, silent=False): """Applies the <patchFile> to the <oldFile> and writes the result to <newFile>""" subprocess.call([BSPATCH_EXE, oldFile, newFile, patchFile], stdout=get_stdout(silent)) def zip_directory(directory, zipPath, silent=False): """Creates a 7z archive at <zipPath> containing the files from <directory>.""" subprocess.call([SEVENZIP_EXE, 'a', zipPath, directory, '-mx9', '-t7z'], stdout=get_stdout(silent)) def unzip_directory(zipPath, directory, silent=False): """Extracts the 7z archive <zipPath> and puts the content into directory <directory>""" subprocess.call([SEVENZIP_EXE, 'x', zipPath, '-o' + directory], stdout=get_stdout(silent)) def get_stdout(silent): if silent: return open(os.devnull, 'wb') else: return None def find_application_version(projectDir): versionFilePath = os.path.join(projectDir, 'VERSION') try: with open(versionFilePath, 'r') as versionFile: versionStr = versionFile.read() return int(versionStr) except ValueError: print 'Invalid Version: "' + versionStr + '"' return None except IOError: print 'Could not open VERSION file at ' + versionFilePath return None class Progress: def __init__(self, total, dots): self.total = total self.current = 0 self.dotsPrinted = 0 self.dotsMax = dots def print_header(self, numSegments=1): sys.stdout.write('[') dotsPerSegment = self.dotsMax / numSegments for i in range(0, self.dotsMax): if (i+1) % dotsPerSegment == 0: sys.stdout.write('|') else: sys.stdout.write(' ') sys.stdout.write(']\n ') def add_progress(self, progress): self.set_progress(self.current + progress) def set_progress(self, progress): if progress <= self.current: return self.current = progress percentage = progress / float(self.total) nextDotPercentage = (self.dotsPrinted + 1) / float(self.dotsMax) if percentage >= nextDotPercentage: sys.stdout.write('.') self.dotsPrinted += 1 if self.current >= self.total: print ''
mit
-1,957,813,704,073,503,700
33.024691
103
0.62627
false
shownotes/snotes20-restapi
snotes20/models/nuser.py
1
6848
import random from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, UserManager from django.db import models, transaction from django.core.mail import send_mail from django.core import validators from django.utils import timezone from django.core.validators import RegexValidator, ValidationError from django.template import Context from django.template.loader import render_to_string from django.conf import settings from django.contrib.auth import login, authenticate def get_random_color(): # close enough to fast.. while True: cc = hex(random.getrandbits(28))[2:8].upper() try: validate_user_color(cc) return cc except: pass def get_color_luminosity(color): # based on etherpad-lite (apache) # https://github.com/ether/etherpad-lite/blob/7b9fd81284a6e2191d007769c899907ea3f64232/src/static/js/colorutils.js#L111-L115 c = [ int(color[0:2], 16) / 255, int(color[2:4], 16) / 255, int(color[4:6], 16) / 255 ] return c[0] * 0.30 + c[1] * 0.59 + c[2] * 0.11 def validate_user_color(color): lum = get_color_luminosity(color) if lum < 0.5: raise ValidationError('color too dark') class NUser(AbstractBaseUser, PermissionsMixin): username = models.CharField('username', max_length=30, unique=True, validators=[validators.RegexValidator(r'^[\w.@+-]+$', 'Enter a valid username.', 'invalid')]) email = models.EmailField('email', unique=True) is_staff = models.BooleanField('is_staff', default=False) is_active = models.BooleanField('is_active', default=False) date_joined = models.DateTimeField('date_joined', default=timezone.now) date_login = models.DateTimeField('date_login', null=True, blank=True) color = models.CharField(max_length=6, default=get_random_color, validators=[ RegexValidator(regex='^[A-F0-9]{6}$', message='No color', code='nocolor'), validate_user_color ]) migrated = models.BooleanField(default=True) old_password = models.CharField(max_length=500, null=True, blank=True, default=None) bio = models.CharField(max_length=400, default='', blank=True) pw_reset_token = models.CharField(max_length=30, null=True, blank=True) objects = UserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['email'] class Meta: verbose_name = 'user' verbose_name_plural = 'users' def save(self, *args, **kwargs): if not self.pw_reset_token: self.pw_reset_token = None if not self.old_password: self.old_password = None super(NUser, self).save(*args, **kwargs) def is_authenticated_raw(self): return super(NUser, self).is_authenticated() def is_authenticated(self): return self.is_authenticated_raw() and self.migrated def get_full_name(self): return self.username def get_short_name(self): return self.username def migrate(self, password, request=None): with transaction.atomic(): self.migrated = True self.old_password = None self.save() if request is None: self.set_password(password) else: self.set_password_keep_session(request, password) def set_password_keep_session(self, request, raw_password): self.set_password(raw_password) self.save() auth_user = authenticate(username=self.username, password=raw_password) login(request, auth_user) def email_user(self, subject, message, from_email=None, recipient=None, **kwargs): if recipient is None: recipient = self.email send_mail(subject, message, from_email, [recipient], **kwargs) def _email_user_template(self, tpl, context, lang, recipient=None): options = settings.EMAILS[tpl] siteurl = settings.SITEURL context['username'] = self.username context['siteurl'] = siteurl c = Context(context) text_content = render_to_string(tpl + '_' + lang + '.txt', c) self.email_user(options['subject'][lang], text_content, recipient=recipient) def add_email_token(self, email): token = '%030x' % random.randrange(16**30) email_token = NUserEmailToken(user=self, email=email, token=token) email_token.save() return email_token def check_email_token(self, token): return self.email_tokens.get(token=token) def apply_email_token(self, token_obj): self.email = token_obj.email self.is_active = True token_obj.delete() self.save() def email_user_activation(self, lang, token): ctx = { 'token': token } self._email_user_template('activation', ctx, lang) def email_new_mail_confirmation(self, lang, token, email): ctx = { 'token': token } self._email_user_template('newmail_confirmation', ctx, lang, recipient=email) def set_pw_reset_token(self): token = '%030x' % random.randrange(16**30) self.pw_reset_token = token self.save() def check_pw_reset_token(self, token): return self.pw_reset_token is not None and\ token is not None and\ self.pw_reset_token == token def apply_pw_reset_token(self, password): print(password) self.pw_reset_token = None self.set_password(password) self.migrated = True self.save() def email_pw_reset(self, lang): ctx = {'token': self.pw_reset_token} self._email_user_template('pwreset', ctx, lang) class NUserEmailToken(models.Model): user = models.ForeignKey(NUser, related_name='email_tokens') email = models.EmailField(unique=True) token = models.CharField(max_length=30) class Meta: verbose_name = "Email token" class NUserSocialType(models.Model): name = models.SlugField(primary_key=True) human_name = models.CharField(max_length=20) icon = models.CharField(max_length=10, null=True, blank=True) def __str__(self): return self.human_name def save(self, *args, **kwargs): if not self.icon: self.icon = None super(NUserSocialType, self).save(*args, **kwargs) class Meta: verbose_name = "Social Type" class NUserSocial(models.Model): user = models.ForeignKey(NUser, db_index=True, related_name="socials") type = models.ForeignKey(NUserSocialType, db_index=True) value = models.CharField(max_length=20) def __str__(self): return self.type.human_name + "(" + self.value + ")" class Meta: unique_together = ('user', 'type') verbose_name = "Social"
agpl-3.0
-1,066,422,913,424,071,600
31.923077
128
0.630111
false
kafan15536900/ADfree-Player-Offline
onServer/ruletool/oproxylist.py
1
2176
[{ "name": "crossdomain_youku", "find": "https?:\/\/static\.youku\.com(\/v[\d\.]*)?\/v\/swf\/.*(\/)?(player|loader).*\.swf", "monitor": "https?:\/\/v\.youku\.com\/crossdomain\.xml", "extra": "crossdomain" }, { "name": "crossdomain_tudou", "find": "http:\/\/static\.youku\.com(\/v[\d\.]*)?\/v\/custom\/.*\/player.*\.swf", "monitor": "https?:\/\/static.youku.com\/crossdomain\.xml", "exfind": "https?:\/\/static.youku.com\/crossdomain\.xml", "extra": "crossdomain" }, { "name": "crossdomain_tudou_sp", "find": ".*olc[^\.]*\.swf", "exfind": "https?:\/\/v\.youku\.com\/crossdomain\.xml", "monitor": "https?:\/\/www\.tudou\.com\/crossdomain\.xml", "extra": "crossdomain" }, { "name": "crossdomain_sohu", "find": "https?:\/\/(tv\.sohu\.com\/upload\/swf\/(?!(ap|56)).*\d+|(\d+\.){3}\d+(:\d+)?\/webplayer)\/(Main|PlayerShell)[^\.]*\.swf", "monitor": "https?:\/\/(photocdn|live\.tv)\.sohu\.com\/crossdomain\.xml", "extra": "crossdomain" }, { "name": "crossdomain_iqiyi|pps-c1", "find": "https?:\/\/www\.iqiyi\.com\/(player\/(\d+\/Player|[a-z0-9]*|cupid\/.*\/(pps[\w]+|clear))|common\/flashplayer\/\d+\/((PPS)?Main|Coop|Share|Enjon)?Player.*_(.|ad\d+))\.swf", "monitor": "\w{32}\.\w{3}.*qyid=\w{32}.*ran=\d+", "extra": "crossdomain" }, { "name": "crossdomain_iqiyi|pps-c2", "find": "https?:\/\/www\.iqiyi\.com\/player\/cupid\/common\/icon\.swf", "monitor": "notavailable", "extra": "crossdomain" }, { "name": "crossdomain_iqiyi|pps-main", "find": "https?:\/\/.*(iqiyi|pps)\.com\/.*\.htm", "exfind": "\w{32}\.\w{3}.*qyid=\w{32}.*ran=\d+", "monitor": "policy\.video\.iqiyi\.com\/crossdomain\.xml", "extra": "crossdomain" }, { "name": "crossdomain_letv", "find": "https?:\/\/.*letv[\w]*\.com\/.*\/((?!(Live|seed|Disk))(S[\w]{2,3})?(?!Live)[\w]{4}|swf)Player*\.swf", "monitor": "https?:\/\/player\.letvcdn\.com\/crossdomain\.xml", "extra": "crossdomain" }, { "name": "crossdomain_douyu", "find": "https?:\/\/staticlive\.douyucdn\.cn\/common\/simplayer\/WebRoom.*\.swf", "monitor": "https?:\/\/www\.douyu\.com\/lapi\/live\/getPlay\/[\d+]", "extra": "crossdomain" }, { "name": "aclog3.huomaotv.cn:80", "find": "", "monitor": "", "extra": "proxy" } ]
gpl-3.0
4,208,300,067,617,454,600
38.581818
181
0.566176
false
YannChemin/distRS
scripts/readNESZ.py
1
5155
#!/usr/bin/env python3 ########################################################### # Use of this file is strictly forbidden !!!!!!!!!!!!!!!!!# # Copyright Yann Chemin, Freelancer, France, 2016 # #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!# import xml.etree.ElementTree as ET from osgeo import gdal, gdal_array, osr import os, sys, glob import numpy as np from scipy.interpolate import interp1d # For debug only safefile="/Users/dnd/snap/Cozmin/S1A_IW_SLC__1SDV_20200509T055041_20200509T055108_032484_03C30B_EF9F.SAFE" out="/Users/dnd/snap/noise_" # Parse arguments import argparse parser = argparse.ArgumentParser() parser.add_argument(dest = "safedir", type = argparse.FileType('r'), help = safefile) parser.add_argument(dest = "outbase", type = str, help = out) args = parser.parse_args() # import arguments into processing safefile = args.safedir out = args.outbase # For GDAL to raise Exceptions gdal.UseExceptions() ################################################ # Go into measurement subdir and open each file ################################################ srcs = glob.glob(safefile+"/measurement/*.tiff") print(safefile+"/measurement/*.tiff") for srcfile in srcs: print(srcfile) ds = gdal.Open(srcfile) dr = ds.GetDriver() outf = out+os.path.basename(srcfile) ds_new = dr.Create(outf,ds.RasterXSize,ds.RasterYSize,ds.RasterCount,gdal.GDT_Float32) ds_new.SetGeoTransform(ds.GetGeoTransform()) srs = osr.SpatialReference() srs.ImportFromEPSG(4326) ds_new.SetProjection(srs.ExportToWkt()) #ds_new.SetProjection(ds.GetProjection()) ds_new.SetMetadata(ds.GetMetadata()) gcp_count = ds.GetGCPCount() if gcp_count != 0: try: ds_new.SetGCPs(ds.GetGCPs(), ds.GetGCPProjection()) except: ds_new.SetGCPs(ds.GetGCPs(), srs.ExportToWkt()) ds = None npimg = np.array(ds_new.GetRasterBand(1).ReadAsArray()) # Clean up pixels to 0 npimg [ npimg != 0 ] = 0 # Clean all pixels to np.nan # npimg.fill(np.nan) # Shape of the numpy array print(npimg.shape) #create xmlfile name xmlfile = safefile+"/annotation/calibration/noise-"+os.path.basename(srcfile)[:-4]+"xml" print(xmlfile) tree = ET.parse(xmlfile) root = tree.getroot() #print(root.tag) #print(root.attrib) # Load line numbers l = [] for dss in root.findall('noiseRangeVectorList'): #print(dss.tag, dss.attrib) for sub in dss: #print(sub.tag, sub.attrib) for val in sub.findall('line'): l.append(int(val.text)) rows=l[-1] print("rows =",rows) # Load pixels p = [] # Load noise n = [] for dss in root.findall('noiseRangeVectorList'): #print(dss.tag, dss.attrib) for sub in dss: #print(sub.tag, sub.attrib) for val in sub.findall('pixel'): p.append([ int(x) for x in (val.text).split(' ') ]) #print(p[-1]) for val in sub.findall('noiseRangeLut'): n.append([ float(x) for x in (val.text).split(' ') ]) ################################# # Interpolate 1D Noise RangeLUT ################################# f = interp1d(p[-1], n[-1], fill_value="extrapolate") xnew = np.linspace(0, npimg.shape[1], npimg.shape[1], endpoint=True) noise1d = f(xnew) #print(noise1d[0],noise1d.shape, npimg.shape) for j in range(npimg.shape[1]): #print("col[%d] = %d" % (j,p[-1][j])) for i in range(len(l)): #print("row[%d] = %d" % (i, l[i])) #print("npimg[%d][%d] = %f" % (l[i],p[-1][j],n[-1][i])) #Write directly as it comes npimg[l[i]][j] = noise1d[j] ########################################## # Interpolate 1D for each npimg.shape[0] ########################################## for j in range(npimg.shape[1]): #x = np.linspace(0, npimg.shape[1], len(l), endpoint=True) #print(npimg[:][j]) #print(len(npimg[:][j])) #print(npimg.shape) # gather only values from noiseRangeLUT to create model of interp1d temparray = np.copy(npimg[:,j]) tarray = temparray [ temparray != 0 ] #print(tarray) #print(tarray.shape) x = np.linspace(0, npimg.shape[0], tarray.shape[0], endpoint=True) f = interp1d(x, tarray, fill_value="extrapolate") xnew = np.linspace(0, npimg.shape[0], npimg.shape[0], endpoint=True) noise1d = f(xnew) #print(noise1d) #print(noise1d.shape) for i in range(npimg.shape[0]): npimg[i][j] = noise1d[i] # write the data ds_new.GetRasterBand(1).WriteArray(npimg, 0, 0) # flush data to disk, set the NoData value and calculate stats ds_new.GetRasterBand(1).FlushCache() ds_new.GetRasterBand(1).SetNoDataValue(-99) # Write to disk dr_new = None #cols=p[-1][-1] #print("cols =",cols) #print(p[0]) #print(n[0])
unlicense
1,387,791,122,149,264,400
34.308219
106
0.546848
false
fr0uty/oartm
tests/lib/test_database.py
1
9847
# -*- coding: utf-8 -*- import pytest import time import datetime from codecs import open from sqlalchemy import event, Table from sqlalchemy.ext.declarative.api import DeclarativeMeta from sqlalchemy.exc import IntegrityError, OperationalError, ProgrammingError from sqlalchemy.orm.util import object_state from collections import OrderedDict from oar.lib import fixture from oar.lib.database import Database, SessionProperty, QueryProperty from oar.lib.compat import StringIO, to_unicode, json from oar.lib.utils import to_json from .. import assert_raises class EngineListener(object): def __init__(self, engine, ignored=('PRAGMA')): self.engine = engine self.ignored = ignored self.buf = StringIO() @event.listens_for(engine, "before_cursor_execute") def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): sql = to_unicode(statement) for string in self.ignored: if sql.lower().startswith(string.lower()): return sql = sql.replace(' \n', '\n').rstrip('\n') self.buf.write(sql.rstrip('\n') + ";" + '\n') @property def raw_sql(self): self.buf.seek(0) return self.buf.getvalue().replace('\t', ' ')\ .rstrip('\n') @pytest.fixture(scope='function') def db(request): db = Database(uri='sqlite://') association_table = db.Table( 'association', db.Column('movie_id', db.Integer, db.ForeignKey('movie.id')), db.Column('actor_id', db.Integer, db.ForeignKey('actor.id')) ) class Movie(db.Model): __table_args__ = ( db.UniqueConstraint('title', name='uix_1'), {'sqlite_autoincrement': True}, ) id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(20)) class Actor(db.DeferredReflectionModel): __table_args__ = ( db.Index('name', 'lastname', 'firstname'), db.UniqueConstraint('firstname', 'lastname', name='uix_1') ) id = db.Column(db.Integer, primary_key=True) firstname = db.Column(db.String(20)) lastname = db.Column(db.String(20)) birthday = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow) movies = db.relationship("Movie", secondary=association_table, backref="actors") return db def test_sqlite_schema(db): engine_listener = EngineListener(db.engine) db.create_all() expected_schemas = """ CREATE TABLE movie ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, title VARCHAR(20) NOT NULL, CONSTRAINT uix_1 UNIQUE (title) ); CREATE TABLE actor ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, firstname VARCHAR(20) NOT NULL, lastname VARCHAR(20) NOT NULL, birthday DATETIME NOT NULL, CONSTRAINT uix_1 UNIQUE (firstname, lastname) ); CREATE INDEX name ON actor (lastname, firstname); CREATE TABLE association ( movie_id INTEGER NOT NULL, actor_id INTEGER NOT NULL, FOREIGN KEY(movie_id) REFERENCES movie (id), FOREIGN KEY(actor_id) REFERENCES actor (id) );""" for schema in expected_schemas.split(';'): assert schema.strip() in engine_listener.raw_sql def test_model_args(db): db.create_all() assert db['actor'].name == "actor" index_columns = list(list(db['actor'].indexes)[0].columns) assert index_columns[0].name == "lastname" assert index_columns[1].name == "firstname" def test_collected_tables_and_models(db): db.create_all() model_names = ('Actor', 'Movie') table_names = ('actor', 'movie', 'association') for model_name in model_names: assert model_name in db assert isinstance(db[model_name], DeclarativeMeta) for table_name in table_names: assert table_name in db assert isinstance(db[table_name], Table) with assert_raises(KeyError, "totototo"): db['totototo'] def test_deferred_reflection(db): db.create_all() db.op.add_column('actor', db.Column('salary', db.Integer, nullable=True, default=1000000)) db.reflect() db['Actor'].create(firstname="Ben", lastname="Affleck", salary=12000000) affleck = db['Actor'].query.first() keys = list(OrderedDict(affleck).keys()) assert affleck.salary == 12000000 assert ['id', 'firstname', 'lastname', 'birthday', 'salary'] == keys def test_db_api_create_and_delete_all(db): db.create_all() db.reflect() dicaprio = db['Actor'].create(firstname="Leonardo", lastname="DiCaprio") ruffalo = db['Actor'].create(firstname="Mark", lastname="Ruffalo") shutter_island = db['Movie'].create(title="Shutter Island") shutter_island.actors.append(dicaprio) shutter_island.actors.append(ruffalo) dicaprio = db['Actor'].query.filter_by(firstname="Leonardo").first() assert dicaprio.lastname == "DiCaprio" assert dicaprio.movies[0].actors[0] is dicaprio assert dicaprio.movies[0].actors[1] is ruffalo with assert_raises(IntegrityError): db['Actor'].create(firstname="Leonardo", lastname="DiCaprio") db.delete_all() assert db['Actor'].query.count() == 0 assert db['Movie'].query.count() == 0 assert len(db.session.execute(db['association'].select()).fetchall()) == 0 def test_db_api_to_dict_json(db): db.create_all() db.reflect() Actor, Movie = db['Actor'], db['Movie'] dt = datetime.datetime(2015, 7, 19, 9, 14, 22, 140921) a1 = Actor.create(firstname="Leonardo", lastname="DiCaprio", birthday=dt) a2 = Actor.create(firstname="Mark", lastname="Ruffalo") m1 = Movie.create(title="Shutter Island") m1.actors.append(a1) m1.actors.append(a2) item = Actor.query.filter_by(firstname="Leonardo").first() item_dict = OrderedDict([('id', 1), ('firstname', 'Leonardo'), ('lastname', 'DiCaprio'), ('birthday', dt)]) assert item.to_dict() == item_dict expected_json = """ { "id": 1, "firstname": "Leonardo", "lastname": "DiCaprio", "birthday": "2015-07-19T09:14:22.140921" }""".strip() assert item.to_json() == expected_json assert to_json(item) == expected_json def test_db_api_others(db): assert repr(db) == "<Database engine=None>" db.create_all() assert repr(db) == "<Database engine=Engine(sqlite://)>" assert db.metadata == db.Model.metadata movie = db['Movie'].create(title="Mad Max") assert repr(movie) == "<Movie (1,)>" assert db.query(db['Movie']).first().title == "Mad Max" assert db.dialect == "sqlite" def test_db_api_add(db): db.create_all() movie = db['Movie'](title="Mad Max") db.add(movie) assert db['Movie'].query.first().title == "Mad Max" def test_db_api_rollback(db): db.create_all() movie = db['Movie'](title="Mad Max") db.add(movie) db.rollback() assert db['Movie'].query.first() is None def test_db_api_flush(db): db.create_all() movie = db['Movie'](title="Mad Max") db.add(movie) assert object_state(movie).pending is True db.flush() assert object_state(movie).persistent is True db.commit() def test_db_api_close(db): assert db.connector is None db.create_all() db['Movie'].create(title="Mad Max") db.add(db['Movie'](title="Mad Max")) assert db.connector is not None session = db.session assert session.new db.close() assert db.connector is None assert not session.new def test_internal_operations(db): assert isinstance(Database.session, SessionProperty) assert Database.Model is None assert Database.query_class is None assert Database.query_collection_class is None assert isinstance(db.Model.query, QueryProperty) def test_load_fixtures(db, tmpdir): ts = int(time.time()) db.create_all() db.op.add_column( table_name='actor', column=db.Column('start_time', db.Integer, nullable=True), ) db.reflect() db.__time_columns__ = ["start_time"] Actor, Movie = db['Actor'], db['Movie'] a1 = Actor.create(firstname="Leonardo", lastname="DiCaprio", start_time=ts) a2 = Actor.create(firstname="Mark", lastname="Ruffalo", start_time=ts) m1 = Movie.create(title="Shutter Island") m1.actors.append(a1) m1.actors.append(a2) assert Actor.query.order_by(Actor.id).first().start_time == ts filepath = tmpdir.join('fixtures.json').strpath fixture.dump_fixtures(db, filepath, ref_time=ts) data = {} with open(filepath, 'r', encoding='utf-8') as fd: data = json.load(fd) assert data['metadata']['ref_time'] == ts fixture.load_fixtures(db, filepath, clear=True, ref_time=None) assert Actor.query.order_by(Actor.id).first().start_time == ts fixture.load_fixtures(db, filepath, clear=True, ref_time=(ts - 10)) assert Actor.query.order_by(Actor.id).first().start_time == (ts - 10) with assert_raises(IntegrityError): fixture.load_fixtures(db, filepath) @pytest.mark.skipif("os.environ.get('DB_TYPE', '') == 'memory'", reason="need persistent database") def test_read_only_session(): from oar.lib import db lenght = len(db['Resource'].query.all()) if db.dialect == "sqlite": exception = OperationalError else: exception = ProgrammingError with assert_raises(exception): with db.session(read_only=True): assert len(db['Resource'].query.all()) == lenght db['Resource'].create() len(db['Resource'].query.all()) == lenght
bsd-3-clause
1,590,199,073,109,283,300
30.662379
79
0.622728
false
marscher/PyEMMA
devtools/conda-recipe/run_test.py
1
1141
import os import sys import pytest test_pkg = 'pyemma' cover_pkg = test_pkg # where to write junit xml junit_xml = os.path.join(os.getenv('CIRCLE_TEST_REPORTS', os.path.expanduser('~')), 'reports', 'junit.xml') target_dir = os.path.dirname(junit_xml) if not os.path.exists(target_dir): os.makedirs(target_dir) print('junit destination:', junit_xml) njobs_args = '-p no:xdist' if os.getenv('TRAVIS') else '-n2' pytest_args = ("-v --pyargs {test_pkg} " "--cov={cover_pkg} " "--cov-report=xml:{dest_report} " "--doctest-modules " "{njobs_args} " "--junit-xml={junit_xml} " "-c {pytest_cfg}" #"--durations=20 " .format(test_pkg=test_pkg, cover_pkg=cover_pkg, junit_xml=junit_xml, pytest_cfg='setup.cfg', dest_report=os.path.join(os.path.expanduser('~/'), 'coverage.xml'), njobs_args=njobs_args, ) .split(' ')) print("args:", pytest_args) res = pytest.main(pytest_args) sys.exit(res)
lgpl-3.0
4,347,374,351,833,799,700
30.694444
90
0.527607
false
mnpiozhang/DocumentSearch
backend/tests.py
1
1877
#!/usr/bin/env python # -*- coding:utf-8 -*- from django.test import TestCase ''' from pdfminer.pdfpage import PDFPage from pdfminer.layout import LAParams from pdfminer.pdfinterp import PDFResourceManager,PDFPageInterpreter from pdfminer.converter import TextConverter from cStringIO import StringIO # Create your tests here. retstr = StringIO() rsrcmgr = PDFResourceManager() laparams = LAParams() codec = 'utf-8' device = TextConverter(rsrcmgr,retstr,codec=codec,laparams=laparams) with open('C:\\Users\\lenovo\\Downloads\\ReferenceCard.pdf'.decode("utf-8"), 'rb') as f: interpreter = PDFPageInterpreter(rsrcmgr, device) for page in PDFPage.get_pages(f): interpreter.process_page(page) device.close() str = retstr.getvalue() retstr.close() print str ''' import json #s={u'docname': u'\u6697\u5ba4\u9022\u706f', u'content': u'\n192.168.187.143 root\xc3\xdc\xc2\xeb\xa3\xbatysxwg\n\xb6\xcb\xbf\xda 6022\n\n\n192.168.23.62 root fNFRNmWy6LD3wddeNQprxHMb$qHB\n\n192.168.73.59 6022 logview logview\n\n192.168.50.235 6022 logfiles logfiles#TYsx2012\n\n\n\n182.138.27.204 54077\n182.138.27.205 54077 1.\xcc\xec\xd2\xed\xca\xd3\xd1\xb6\xcd\xf8\xb9\xdc\n\ntysxwg\nTy_wg1q2w3e4r', u'description': u'\u6492\u5730\u65b9'} #b = json.dumps(s) #print b bb = u'\u6492\u5730\u65b9' #print bb.encode('utf-8') ss = '\n192.168.187.143 root\xc3\xdc\xc2\xeb\xa3\xbatysxwg\n\xb6\xcb\xbf\xda 6022\n\n\n192.168.23.62 root fNFRNmWy6LD3wddeNQprxHMb$qHB\n\n192.168.73.59 6022 logview logview\n\n192.168.50.235 6022 logfiles logfiles#TYsx2012\n\n\n\n182.138.27.204 54077\n182.138.27.205 54077 1.\xcc\xec\xd2\xed\xca\xd3\xd1\xb6\xcd\xf8\xb9\xdc\n\ntysxwg\nTy_wg1q2w3e4r' #print unicode(ss.decode('gbk')) #print ss.decode('gbk').encode('utf-8') ccc=u'/home/hu/ds/DocumentSearch/media/2017/03/15/\u8bdd\u5355\u76d1\u63a7.docx' print ccc.decode("utf-8")
mit
1,547,040,591,628,544,300
43.714286
454
0.73788
false
magahet/peon
peon/entity.py
1
3494
from types import (MobTypes, ObjectTypes) import numpy as np import utils class BaseEntity(object): '''Prototype class with position methods''' def __init__(self, eid, x, y, z, pitch, yaw): self.eid = eid self.x = x self.y = y self.z = z self.pitch = pitch self.yaw = yaw @property def position(self): return (self.x, self.y, self.z) def get_position(self, dx=0, dy=0, dz=0, floor=False): if self.x is None: return (None, None, None) position = np.add((self.x, self.y, self.z), (dx, dy, dz)) if floor: return tuple([int(i) for i in np.floor(position)]) else: return tuple(position) def move(self, dx, dy, dz): self.x += dx self.y += dy self.z += dz def look(self, yaw, pitch): self.yaw = yaw self.pitch = pitch def teleport(self, x, y, z, yaw, pitch): self.x = x self.y = y self.z = z self.yaw = yaw self.pitch = pitch class Object(BaseEntity): '''Represents objects/dropped items''' def __init__(self, eid, _type, x, y, z, pitch, yaw, data): self._type = _type if isinstance(_type, int) else ObjectTypes.get_id(_type) self.data = data self.metadata = {} super(Object, self).__init__(eid, x, y, z, pitch, yaw) def __repr__(self): return 'Object(eid={}, _type={}, xyx={})'.format( self.eid, ObjectTypes.get_name(self._type), self.get_position(floor=True)) class PlayerEntity(BaseEntity): '''Represents other players''' def __init__(self, eid, uuid, name, x, y, z, yaw, pitch, current_item, metadata): self.uuid = uuid self.name = name self.current_item = current_item self.metadata = metadata self.name = None super(PlayerEntity, self).__init__(eid, x, y, z, pitch, yaw) def __repr__(self): return 'PlayerEntity(eid={}, name={}, xyz={})'.format( self.eid, self.name, self.get_position(floor=True)) class Entity(BaseEntity): '''Represents mobs''' def __init__(self, eid, _type, x, y, z, pitch, head_pitch, yaw, velocity_x, velocity_y, velocity_z, metadata): self._type = _type if isinstance(_type, int) else MobTypes.get_id(_type) self.head_pitch = head_pitch self.velocity_x = velocity_x self.velocity_y = velocity_y self.velocity_z = velocity_z self.metadata = metadata super(Entity, self).__init__(eid, x, y, z, pitch, yaw) def __repr__(self): return 'Entity(eid={}, _type={}, xyz={})'.format( self.eid, MobTypes.get_name(self._type), self.get_position(floor=True)) class BlockEntity(object): def __init__(self, location, data): self.location = location self.data = utils.unpack_nbt(data[1]) self._type = self.data.get('id') def __repr__(self): return 'BlockEntity(location={}, type={}, data={})'.format( self.location, self._type, self.data) def __contains__(self, other): return other in self.data.keys() def get(self, key): return self.data.get(key) def keys(self): return self.data.keys() def iteritems(self): for key in self.data.keys(): yield (key, self.data.get(key)) def items(self): return [t for t in self.iteritems]
mit
2,511,374,138,964,423,000
27.876033
83
0.554379
false
stefanfoulis/django-filer-test
filer/tests/models.py
1
5743
import os from django.test import TestCase from django.core.files import File as DjangoFile from filer.models.foldermodels import Folder from filer.models.imagemodels import Image from filer.models.clipboardmodels import Clipboard from filer.admin.clipboardadmin import UploadImageFileForm from filer.tests.helpers import (create_superuser, create_folder_structure, create_image, create_clipboard_item) from filer import settings as filer_settings class FilerApiTests(TestCase): def setUp(self): self.superuser = create_superuser() self.client.login(username='admin', password='secret') self.img = create_image() self.image_name = 'test_file.jpg' self.filename = os.path.join(os.path.dirname(__file__), self.image_name) self.img.save(self.filename, 'JPEG') def tearDown(self): self.client.logout() os.remove(self.filename) for img in Image.objects.all(): img.delete() def create_filer_image(self): file = DjangoFile(open(self.filename), name=self.image_name) image = Image.objects.create(owner=self.superuser, original_filename=self.image_name, file=file) return image def test_create_folder_structure(self): create_folder_structure(depth=3, sibling=2, parent=None) self.assertEqual(Folder.objects.count(), 26) def test_create_and_delete_image(self): self.assertEqual(Image.objects.count(), 0) image = self.create_filer_image() image.save() self.assertEqual(Image.objects.count(), 1) image = Image.objects.all()[0] image.delete() self.assertEqual(Image.objects.count(), 0) def test_upload_image_form(self): self.assertEqual(Image.objects.count(), 0) file = DjangoFile(open(self.filename), name=self.image_name) upoad_image_form = UploadImageFileForm({'original_filename':self.image_name, 'owner': self.superuser.pk}, {'file':file}) if upoad_image_form.is_valid(): image = upoad_image_form.save() self.assertEqual(Image.objects.count(), 1) def test_create_clipboard_item(self): image = self.create_filer_image() image.save() # Get the clipboard of the current user clipboard_item = create_clipboard_item(user=self.superuser, file=image) clipboard_item.save() self.assertEqual(Clipboard.objects.count(), 1) def test_create_icons(self): image = self.create_filer_image() image.save() icons = image.icons file_basename = os.path.basename(image.file.path) self.assertEqual(len(icons), 3) self.assertEqual(os.path.basename(icons['32']), file_basename + u'.32x32_q85_crop_upscale.jpg') self.assertEqual(os.path.basename(icons['48']), file_basename + u'.48x48_q85_crop_upscale.jpg') self.assertEqual(os.path.basename(icons['64']), file_basename + u'.64x64_q85_crop_upscale.jpg') def test_file_upload_public_destination(self): """ Test where an image `is_public` == True is uploaded. """ image = self.create_filer_image() image.is_public = True image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT)) def test_file_upload_private_destination(self): """ Test where an image `is_public` == False is uploaded. """ image = self.create_filer_image() image.is_public = False image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT)) def test_file_move_location(self): """ Test the method that move a file between filer_public, filer_private and vice et versa """ image = self.create_filer_image() image.is_public = False image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT)) image._move_file(filer_settings.FILER_PRIVATEMEDIA_PREFIX, filer_settings.FILER_PUBLICMEDIA_PREFIX) image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT)) def test_file_change_upload_to_destination(self): """ Test that the file is actualy move from the private to the public directory when the is_public is checked on an existing private file. """ file = DjangoFile(open(self.filename), name=self.image_name) image = Image.objects.create(owner=self.superuser, is_public=False, original_filename=self.image_name, file=file) image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT)) image.is_public = True image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT)) self.assertEqual(len(image.icons), 3) image.is_public = False image.save() self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT)) self.assertEqual(len(image.icons), 3)
mit
-1,759,177,181,968,339,200
39.737589
91
0.590284
false
ForestClaw/forestclaw
applications/geoclaw/tohoku/setrun.py
1
17915
""" Module to set up run time parameters for Clawpack. The values set in the function setrun are then written out to data files that will be read in by the Fortran code. """ import os import numpy as np from pdb import * # import topotools try: FCLAW = os.environ['FCLAW'] except: raise Exception("*** Must First set FCLAW environment variable") topodir = os.path.join(FCLAW, 'applications','geoclaw', 'scratch') if not os.path.isdir(topodir): raise Exception("*** Missing topo directory: %s" % topodir) minlevel = 3 maxlevel = 7 #------------------------------ def setrun(claw_pkg='geoclaw'): #------------------------------ """ Define the parameters used for running Clawpack. INPUT: claw_pkg expected to be "geoclaw" for this setrun. OUTPUT: rundata - object of class ClawRunData """ from clawpack.clawutil import data assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'" num_dim = 2 rundata = data.ClawRunData(claw_pkg, num_dim) #------------------------------------------------------------------ # GeoClaw specific parameters: #------------------------------------------------------------------ rundata = setgeo(rundata) #------------------------------------------------------------------ # Standard Clawpack parameters to be written to claw.data: # (or to amr2ez.data for AMR) #------------------------------------------------------------------ clawdata = rundata.clawdata # initialized when rundata instantiated # Set single grid parameters first. # See below for AMR parameters. # --------------- # Parameters # --------------- mx = 32 # Ignored my = 32 # --------------- # Spatial domain: # --------------- # Number of space dimensions: clawdata.num_dim = num_dim # Lower and upper edge of computational domain: clawdata.lower[0] = 132.0 # xlower clawdata.upper[0] = 210.0 # xupper clawdata.lower[1] = 9.0 # ylower clawdata.upper[1] = 53.0 # yupper # Number of grid cells (ignored by ForestClaw) clawdata.num_cells[0] = mx # mx clawdata.num_cells[1] = my # my # Number of space dimensions: clawdata.num_dim = num_dim # --------------- # Gauges: # --------------- gauges = rundata.gaugedata.gauges # for gauges append lines of the form [gaugeno, x, y, t1, t2] (25200,inf) gauges.append([1123, 203.52825, 20.9021333, 7.0*3600., 1.e9]) #Kahului #gauges.append([5680, 203.52333, 20.895, 7.0*3600., 1.e9]) #TG Kahului # more accurate coordinates from Yong Wei at PMEL: (25200,inf) gauges.append([5680, 203.530944, 20.895, 7.0*3600., 1.e9]) #TG Kahului ## Add gauges for comparison with GeoClaw gauges.append([1, 143.214480, 38.011905, 0, 1.e9]) gauges.append([2, 144.851543, 33.090886, 0, 1.e9]) gauges.append([3, 170.233553, 29.893284, 0, 1.e9]) gauges.append([4, 196.417438, 20.561113, 0, 1.e9]) # --------------- # Size of system: # --------------- # Number of equations in the system: clawdata.num_eqn = 3 # Number of auxiliary variables in the aux array (initialized in setaux) clawdata.num_aux = 3 # Index of aux array corresponding to capacity function, if there is one: clawdata.capa_index = 2 # ------------- # Initial time: # ------------- clawdata.t0 = 0.0 # Restart from checkpoint file of a previous run? # Note: If restarting, you must also change the Makefile to set: # RESTART = True # If restarting, t0 above should be from original run, and the # restart_file 'fort.chkNNNNN' specified below should be in # the OUTDIR indicated in Makefile. clawdata.restart = False # True to restart from prior results clawdata.restart_file = 'fort.chk00006' # File to use for restart data # ------------- # Output times: #-------------- # Specify at what times the results should be written to fort.q files. # Note that the time integration stops after the final output time. # The solution at initial time t0 is always written in addition. clawdata.output_style = 1 if clawdata.output_style==1: # Output ntimes frames at equally spaced times up to tfinal: # Can specify num_output_times = 0 for no output clawdata.num_output_times = 26 clawdata.tfinal = 13*3600. clawdata.output_t0 = True # output at initial (or restart) time? elif clawdata.output_style == 2: # Specify a list or numpy array of output times: # Include t0 if you want output at the initial time. clawdata.output_times = np.linspace(7,13,4)*3600. elif clawdata.output_style == 3: # Output every step_interval timesteps over total_steps timesteps: clawdata.output_step_interval = 1 clawdata.total_steps = 1 clawdata.output_t0 = False # output at initial (or restart) time? clawdata.output_format = 'ascii' # 'ascii' or 'netcdf' clawdata.output_q_components = 'all' # could be list such as [True,True] clawdata.output_aux_components = 'none' # could be list clawdata.output_aux_onlyonce = True # output aux arrays only at t0 # --------------------------------------------------- # Verbosity of messages to screen during integration: # --------------------------------------------------- # The current t, dt, and cfl will be printed every time step # at AMR levels <= verbosity. Set verbosity = 0 for no printing. # (E.g. verbosity == 2 means print only on levels 1 and 2.) clawdata.verbosity = 1 # -------------- # Time stepping: # -------------- # if dt_variable==1: variable time steps used based on cfl_desired, # if dt_variable==0: fixed time steps dt = dt_initial will always be used. clawdata.dt_variable = True # Initial time step for variable dt. # If dt_variable==0 then dt=dt_initial for all steps: clawdata.dt_initial = 0.016 # Max time step to be allowed if variable dt used: clawdata.dt_max = 1e+99 # Desired Courant number if variable dt used, and max to allow without # retaking step with a smaller dt: clawdata.cfl_desired = 0.75 clawdata.cfl_max = 1.0 # Maximum number of time steps to allow between output times: clawdata.steps_max = 50000 # ------------------ # Method to be used: # ------------------ # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters clawdata.order = 2 # Use dimensional splitting? (not yet available for AMR) clawdata.dimensional_split = 'unsplit' # For unsplit method, transverse_waves can be # 0 or 'none' ==> donor cell (only normal solver used) # 1 or 'increment' ==> corner transport of waves # 2 or 'all' ==> corner transport of 2nd order corrections too clawdata.transverse_waves = 2 # Number of waves in the Riemann solution: clawdata.num_waves = 3 # List of limiters to use for each wave family: # Required: len(limiter) == num_waves # Some options: # 0 or 'none' ==> no limiter (Lax-Wendroff) # 1 or 'minmod' ==> minmod # 2 or 'superbee' ==> superbee # 3 or 'mc' ==> MC limiter # 4 or 'vanleer' ==> van Leer clawdata.limiter = ['mc', 'mc', 'mc'] clawdata.use_fwaves = True # True ==> use f-wave version of algorithms # Source terms splitting: # src_split == 0 or 'none' ==> no source term (src routine never called) # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used, # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended. clawdata.source_split = 'godunov' # -------------------- # Boundary conditions: # -------------------- # Number of ghost cells (usually 2) clawdata.num_ghost = 2 # Choice of BCs at xlower and xupper: # 0 => user specified (must modify bcN.f to use this option) # 1 => extrapolation (non-reflecting outflow) # 2 => periodic (must specify this at both boundaries) # 3 => solid wall for systems where q(2) is normal velocity clawdata.bc_lower[0] = 'extrap' clawdata.bc_upper[0] = 'extrap' clawdata.bc_lower[1] = 'extrap' clawdata.bc_upper[1] = 'extrap' # Specify when checkpoint files should be created that can be # used to restart a computation. clawdata.checkpt_style = 0 if clawdata.checkpt_style == 0: # Do not checkpoint at all pass elif clawdata.checkpt_style == 1: # Checkpoint only at tfinal. pass elif clawdata.checkpt_style == 2: # Specify a list of checkpoint times. clawdata.checkpt_times = [0.1,0.15] elif clawdata.checkpt_style == 3: # Checkpoint every checkpt_interval timesteps (on Level 1) # and at the final time. clawdata.checkpt_interval = 5 # ----------------------------------------------- # AMR parameters: # ----------------------------------------------- amrdata = rundata.amrdata maxlevel = 15 # List of refinement ratios at each level (length at least amr_level_max-1) # 2 degree, 24', 4', 1', 10", 1/3" amrdata.amr_levels_max = maxlevel # Set to 3 for best results # Not used in ForestClaw amrdata.refinement_ratios_x = [2]*(maxlevel-1) amrdata.refinement_ratios_y = [2]*(maxlevel-1) amrdata.refinement_ratios_t = [2]*(maxlevel-1) # Specify type of each aux variable in amrdata.auxtype. # This must be a list of length maux, each element of which is one of: # 'center', 'capacity', 'xleft', or 'yleft' (see documentation). amrdata.aux_type = ['center','capacity','yleft'] # Flag using refinement routine flag2refine rather than richardson error amrdata.flag_richardson = False # use Richardson? amrdata.flag2refine = True amrdata.flag2refine_tol = 0.5 # tolerance used in this routine amrdata.regrid_interval = 3 amrdata.regrid_buffer_width = 2 amrdata.clustering_cutoff = 0.700000 amrdata.verbosity_regrid = 0 # --------------- # Regions: # --------------- regions = rundata.regiondata.regions # ------------------------------ # Regions # ------------------------------ inf = 1e9 # Not sure why these are included : # regions.append([minlevel, minlevel+3, 0., 5.*3600., 132., 220., 5., 40.]) # Region describing topo region # Data on topo extent extracted from Fujii.txydz # # ------------------------------------------------------------------ # To match Geoclaw levels (N0 = mx = 16) # # GeoClaw --> ForestClaw # # Levels RR RR.cumprod() Levels RR RR.cumprod() # 1 (1) (1) --> 0 (1) (1) # 2 (5) (5) --> 2 (4) (4) # 3 (6) (30) --> 5 (8) (32) # 4 (4) (120) --> 7 (4) (128) # 5 (6) (720) --> 9 (4) (512) # 6 (30) (21600) --> 14 (32) (16384) # ------------------------------------------------------------------ # Region 0 : Encompasses entire domain (limits refinement # in upper 1/4 portion of domain) (0, inf) regions.append([0, 1, 0., 1e9, 0, 360, -90, 90]) # Region 1 : Topo map at initial earthquake site; Refine long enough to # resolve initial disturbance. (0,3600) regions.append([5, 5, 0., 1, 135., 150., 30., 45.]) # Region 2 : Large region encompassing most of lower portion of domain (0,18000) regions.append([0, 5, 0., 5.*3600., 132., 220., 5., 40.]) # Region 3 : Large region encompassing Hawaii Islands (18000,28800) regions.append([0, 5, 5.0*3600., 8.0*3600, 180.0, 220.0, 5.0, 40.0]) # Region 4 : Includes Maui and Molokai (23400.0, inf) regions.append([7, 7, 6.5*3600., inf, 202.5, 204.0, 20.4, 21.4]) # Region 5 : Strip including north shore of Maui (25200, inf) regions.append([9, 9, 7.*3600., inf, 203.0, 203.7, 20.88333, 21.]) # Region 6 : Port at Kailua (26100.0, inf) regions.append([14, 14, 7.25*3600., inf, 203.52,203.537,20.89,20.905]) # ------------------------------------------------------------------ # Try to reduce amount of time spent in ghost filling # To match Geoclaw levels (mx = 32) # # GeoClaw --> ForestClaw # # Levels RR RR.cumprod() Levels RR RR.cumprod() # 1 (1) (1) --> 0 (1) (1) # 2 (5) (5) --> 2 (4) (4) # 3 (6) (30) --> 5 (8) (32) # 4 (4) (120) --> 6 (2) (64) < ---- only difference # 5 (6) (720) --> 9 (8) (512) # 6 (30) (21600) --> 14 (32) (16384) # ------------------------------------------------------------------ # # Region 0 : Encompasses entire domain (limits refinement # # in upper 1/4 portion of domain) (0, inf) # regions.append([0, 1, 0., 1.0e9, 0, 360, -90, 90]) # # # Region 1 : Topo map at initial earthquake site; # # Extent of the region (including time component) is taken from the dtopo file. # regions.append([5, 5, 0, 1, 135., 150., 30., 45.]) # # # Region 2 : Large region encompassing most of lower portion of domain (0,18000) # regions.append([0, 5, 0, 5*3600., 132., 220., 5., 40.]) # # # Region 3 : Large region encompassing Hawaii Islands (18000,28800) # regions.append([0, 5, 5.0*3600., 8.0*3600, 180.0, 220.0, 5.0, 40.0]) # # # Region 4 : Includes Maui and Molokai (23400.0, inf) # regions.append([6, 6, 6.5*3600., inf, 202.5, 204.0, 20.4, 21.4]) # # # Region 5 : Strip including north shore of Maui (25200, inf) # regions.append([9, 9, 7.*3600., inf, 203.0, 203.7, 20.88333, 21.]) # # # Region 6 : Port at Kailua (26100.0, inf) # regions.append([14, 14, 7.25*3600., inf, 203.52,203.537,20.89,20.905]) # ------------------------------------------------------- # For developers # -- Toggle debugging print statements: # ------------------------------------------------------- amrdata.dprint = False # print domain flags amrdata.eprint = False # print err est flags amrdata.edebug = False # even more err est flags amrdata.gprint = False # grid bisection/clustering amrdata.nprint = False # proper nesting output amrdata.pprint = False # proj. of tagged points amrdata.rprint = False # print regridding summary amrdata.sprint = False # space/memory output amrdata.tprint = True # time step reporting each level amrdata.uprint = False # update/upbnd reporting return rundata # end of function setrun # ---------------------- #------------------- def setgeo(rundata): #------------------- """ Set GeoClaw specific runtime parameters. For documentation see .... """ try: geo_data = rundata.geo_data except: print("*** Error, this rundata has no geo_data attribute") raise AttributeError("Missing geo_data attribute") topofile = 'topos/TetonLarge.topo' # == Physics == geo_data.gravity = 9.81 geo_data.coordinate_system = 2 # LatLong coordinates geo_data.earth_radius = 6367.5e3 # == Forcing Options geo_data.coriolis_forcing = False # == Algorithm and Initial Conditions == geo_data.sea_level = 0.0 geo_data.dry_tolerance = 1.e-3 geo_data.friction_forcing = True geo_data.manning_coefficient = 0.035 geo_data.friction_depth = 500 # Refinement data refinement_data = rundata.refinement_data refinement_data.variable_dt_refinement_ratios = True refinement_data.wave_tolerance = 0.016 # Original setting : 0.016 refinement_data.deep_depth = 200 refinement_data.max_level_deep = 4 # == settopo.data values == topo_data = rundata.topo_data # for topography, append lines of the form # [topotype, minlevel, maxlevel, t1, t2, fname] # == settopo.data values == # Set minlevel=maxlevel=0 topofiles = rundata.topo_data.topofiles topofiles.append([3, 0, 0, 0.0, 1e10, os.path.join(topodir,'etopo1min130E210E0N60N.asc')]) # topofiles.append([3, 0, 0, 0.0, 1e10, os.path.join(topodir,'hawaii_6s.txt')]) topofiles.append([3, 0, 0, 0., 1.e10, os.path.join(topodir,'kahului_1s.txt')]) # == setdtopo.data values == # topo_data = rundata.topo_data # for moving topography, append lines of the form : (<= 1 allowed for now!) # [topotype, minlevel,maxlevel,fname] rundata.dtopo_data.dtopofiles = [[1, 0, 0,os.path.join(topodir,'fujii.txydz')]] # == setqinit.data values == rundata.qinit_data.qinit_type = 0 rundata.qinit_data.qinitfiles = [] # for qinit perturbations, append lines of the form: (<= 1 allowed for now!) # [minlev, maxlev, fname] # == fixedgrids.data values == # for fixed grids append lines of the form # [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\ # ioutarrivaltimes,ioutsurfacemax] rundata.fixed_grid_data.fixedgrids = [] fixedgrids = rundata.fixed_grid_data.fixedgrids return rundata # end of function setgeo # ---------------------- if __name__ == '__main__': # Set up run-time parameters and write all data files. import sys rundata = setrun(*sys.argv[1:]) rundata.write()
bsd-2-clause
-2,912,893,298,897,029,600
32.674812
94
0.553893
false
BrunoEV/shutdown-dialog
shutdown-menu-deb.py
1
3499
#!/usr/bin/python # -*- coding: utf-8 -*- #Author: Bruno Expósito #Contact: [email protected] #License: GPL 3 #Original design: http://hxmarius.deviantart.com/art/Elementary-Shutdown-Dialog-Mockup-V2-359472999 import sys, os from PyQt4 import QtCore from PyQt4.QtGui import * class HoverButton(QToolButton): def __init__(self, name="", parent=None): super(HoverButton, self).__init__(parent) self.setStyleSheet("background-image: url(" + absolutePath(name + ".png") + ");") self.setObjectName(name) self.setFixedSize(30,30) if name == 'close' else self.setFixedSize(110,104) def enterEvent(self,event): self.setStyleSheet("background-image: url(" + absolutePath(self.objectName() + "2.png") + ");") def leaveEvent(self,event): self.setStyleSheet("background-image: url(" + absolutePath(self.objectName() + ".png") + ");") def mousePressEvent(self, event): if self.objectName() == 'shutdown': run("/usr/bin/dbus-send --system --print-reply --dest='org.freedesktop.ConsoleKit' /org/freedesktop/ConsoleKit/Manager org.freedesktop.ConsoleKit.Manager.Stop") elif self.objectName() == 'restart': run("/usr/bin/dbus-send --system --print-reply --dest='org.freedesktop.ConsoleKit' /org/freedesktop/ConsoleKit/Manager org.freedesktop.ConsoleKit.Manager.Restart") elif self.objectName() == 'logout': run("pkill -u `whoami`") elif self.objectName() == 'lock': run("xset dpms force off") QApplication.quit() else: QApplication.quit() def main(): app = QApplication(sys.argv) w = QWidget() #Basic config w.setWindowTitle('Shutdown') w.resize(525, 148) w.setWindowIcon(QIcon(absolutePath("icon.png"))) #Background background = QLabel(w) background.setGeometry(0, 0, 525, 148) background.setPixmap(QPixmap(absolutePath("background.png"))) #Buttons shutdown=HoverButton("shutdown") restart=HoverButton("restart") logout=HoverButton("logout") lock=HoverButton("lock") #Labels separator1 = QLabel() separator2 = QLabel() separator3 = QLabel() separator1.setPixmap(QPixmap(absolutePath("separator.png"))) separator2.setPixmap(QPixmap(absolutePath("separator.png"))) separator3.setPixmap(QPixmap(absolutePath("separator.png"))) #Layout layout = QHBoxLayout() layout.addStretch() layout.setContentsMargins(23,22,23,22) layout.addWidget(shutdown) layout.addWidget(separator1) layout.addWidget(restart) layout.addWidget(separator2) layout.addWidget(logout) layout.addWidget(separator3) layout.addWidget(lock) w.setLayout(layout) #Forever alone (button version) close = HoverButton("close", w) #Move the window at the center w.move(QApplication.desktop().screen().rect().center()- w.rect().center()) #Remove the borders, buttons, etc w.setWindowFlags(w.windowFlags() | QtCore.Qt.FramelessWindowHint) #Background transparent w.setAttribute(QtCore.Qt.WA_TranslucentBackground) #CSS QApplication.instance().setStyleSheet('QToolButton {border: none;}') w.show() sys.exit(app.exec_()) def absolutePath(myFile): return os.path.dirname(os.path.abspath(__file__)) + "/pictures/" + myFile def run(command): return os.popen(command).read() if __name__ == '__main__': main()
gpl-3.0
829,175,310,360,148,500
31.388889
175
0.658376
false
PatrickLib/captcha_recognize
captcha_model.py
1
4268
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import captcha_input import config IMAGE_WIDTH = config.IMAGE_WIDTH IMAGE_HEIGHT = config.IMAGE_HEIGHT CLASSES_NUM = config.CLASSES_NUM CHARS_NUM = config.CHARS_NUM def inputs(train, batch_size): return captcha_input.inputs(train, batch_size=batch_size) def _conv2d(value, weight): """conv2d returns a 2d convolution layer with full stride.""" return tf.nn.conv2d(value, weight, strides=[1, 1, 1, 1], padding='SAME') def _max_pool_2x2(value, name): """max_pool_2x2 downsamples a feature map by 2X.""" return tf.nn.max_pool(value, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def _weight_variable(name, shape): """weight_variable generates a weight variable of a given shape.""" with tf.device('/cpu:0'): initializer = tf.truncated_normal_initializer(stddev=0.1) var = tf.get_variable(name,shape,initializer=initializer, dtype=tf.float32) return var def _bias_variable(name, shape): """bias_variable generates a bias variable of a given shape.""" with tf.device('/cpu:0'): initializer = tf.constant_initializer(0.1) var = tf.get_variable(name, shape, initializer=initializer,dtype=tf.float32) return var def inference(images, keep_prob): images = tf.reshape(images, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]) with tf.variable_scope('conv1') as scope: kernel = _weight_variable('weights', shape=[3,3,1,64]) biases = _bias_variable('biases',[64]) pre_activation = tf.nn.bias_add(_conv2d(images, kernel),biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) pool1 = _max_pool_2x2(conv1, name='pool1') with tf.variable_scope('conv2') as scope: kernel = _weight_variable('weights', shape=[3,3,64,64]) biases = _bias_variable('biases',[64]) pre_activation = tf.nn.bias_add(_conv2d(pool1, kernel),biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) pool2 = _max_pool_2x2(conv2, name='pool2') with tf.variable_scope('conv3') as scope: kernel = _weight_variable('weights', shape=[3,3,64,64]) biases = _bias_variable('biases',[64]) pre_activation = tf.nn.bias_add(_conv2d(pool2, kernel),biases) conv3 = tf.nn.relu(pre_activation, name=scope.name) pool3 = _max_pool_2x2(conv3, name='pool3') with tf.variable_scope('conv4') as scope: kernel = _weight_variable('weights', shape=[3,3,64,64]) biases = _bias_variable('biases',[64]) pre_activation = tf.nn.bias_add(_conv2d(pool3, kernel),biases) conv4 = tf.nn.relu(pre_activation, name=scope.name) pool4 = _max_pool_2x2(conv4, name='pool4') with tf.variable_scope('local1') as scope: batch_size = images.get_shape()[0].value reshape = tf.reshape(pool4, [batch_size,-1]) dim = reshape.get_shape()[1].value weights = _weight_variable('weights', shape=[dim,1024]) biases = _bias_variable('biases',[1024]) local1 = tf.nn.relu(tf.matmul(reshape,weights) + biases, name=scope.name) local1_drop = tf.nn.dropout(local1, keep_prob) with tf.variable_scope('softmax_linear') as scope: weights = _weight_variable('weights',shape=[1024,CHARS_NUM*CLASSES_NUM]) biases = _bias_variable('biases',[CHARS_NUM*CLASSES_NUM]) softmax_linear = tf.add(tf.matmul(local1_drop,weights), biases, name=scope.name) return tf.reshape(softmax_linear, [-1, CHARS_NUM, CLASSES_NUM]) def loss(logits, labels): cross_entropy = tf.nn.softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='corss_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss') def training(loss): optimizer = tf.train.AdamOptimizer(1e-4) train_op = optimizer.minimize(loss) return train_op def evaluation(logits, labels): correct_prediction = tf.equal(tf.argmax(logits,2), tf.argmax(labels,2)) correct_batch = tf.reduce_mean(tf.cast(correct_prediction, tf.int32), 1) return tf.reduce_sum(tf.cast(correct_batch, tf.float32)) def output(logits): return tf.argmax(logits, 2)
apache-2.0
-2,014,430,384,479,142,000
34.865546
84
0.685567
false
twilio/twilio-python
twilio/rest/serverless/v1/service/asset/__init__.py
1
15604
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page from twilio.rest.serverless.v1.service.asset.asset_version import AssetVersionList class AssetList(ListResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid): """ Initialize the AssetList :param Version version: Version that contains the resource :param service_sid: The SID of the Service that the Asset resource is associated with :returns: twilio.rest.serverless.v1.service.asset.AssetList :rtype: twilio.rest.serverless.v1.service.asset.AssetList """ super(AssetList, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, } self._uri = '/Services/{service_sid}/Assets'.format(**self._solution) def stream(self, limit=None, page_size=None): """ Streams AssetInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.serverless.v1.service.asset.AssetInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(page_size=limits['page_size'], ) return self._version.stream(page, limits['limit']) def list(self, limit=None, page_size=None): """ Lists AssetInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.serverless.v1.service.asset.AssetInstance] """ return list(self.stream(limit=limit, page_size=page_size, )) def page(self, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of AssetInstance records from the API. Request is executed immediately :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetPage """ data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page(method='GET', uri=self._uri, params=data, ) return AssetPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of AssetInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return AssetPage(self._version, response, self._solution) def create(self, friendly_name): """ Create the AssetInstance :param unicode friendly_name: A string to describe the Asset resource :returns: The created AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ data = values.of({'FriendlyName': friendly_name, }) payload = self._version.create(method='POST', uri=self._uri, data=data, ) return AssetInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def get(self, sid): """ Constructs a AssetContext :param sid: The SID that identifies the Asset resource to fetch :returns: twilio.rest.serverless.v1.service.asset.AssetContext :rtype: twilio.rest.serverless.v1.service.asset.AssetContext """ return AssetContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a AssetContext :param sid: The SID that identifies the Asset resource to fetch :returns: twilio.rest.serverless.v1.service.asset.AssetContext :rtype: twilio.rest.serverless.v1.service.asset.AssetContext """ return AssetContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Serverless.V1.AssetList>' class AssetPage(Page): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, response, solution): """ Initialize the AssetPage :param Version version: Version that contains the resource :param Response response: Response from the API :param service_sid: The SID of the Service that the Asset resource is associated with :returns: twilio.rest.serverless.v1.service.asset.AssetPage :rtype: twilio.rest.serverless.v1.service.asset.AssetPage """ super(AssetPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of AssetInstance :param dict payload: Payload response from the API :returns: twilio.rest.serverless.v1.service.asset.AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ return AssetInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Serverless.V1.AssetPage>' class AssetContext(InstanceContext): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid, sid): """ Initialize the AssetContext :param Version version: Version that contains the resource :param service_sid: The SID of the Service to fetch the Asset resource from :param sid: The SID that identifies the Asset resource to fetch :returns: twilio.rest.serverless.v1.service.asset.AssetContext :rtype: twilio.rest.serverless.v1.service.asset.AssetContext """ super(AssetContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Assets/{sid}'.format(**self._solution) # Dependents self._asset_versions = None def fetch(self): """ Fetch the AssetInstance :returns: The fetched AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ payload = self._version.fetch(method='GET', uri=self._uri, ) return AssetInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the AssetInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete(method='DELETE', uri=self._uri, ) def update(self, friendly_name): """ Update the AssetInstance :param unicode friendly_name: A string to describe the Asset resource :returns: The updated AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ data = values.of({'FriendlyName': friendly_name, }) payload = self._version.update(method='POST', uri=self._uri, data=data, ) return AssetInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) @property def asset_versions(self): """ Access the asset_versions :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionList :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionList """ if self._asset_versions is None: self._asset_versions = AssetVersionList( self._version, service_sid=self._solution['service_sid'], asset_sid=self._solution['sid'], ) return self._asset_versions def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Serverless.V1.AssetContext {}>'.format(context) class AssetInstance(InstanceResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, payload, service_sid, sid=None): """ Initialize the AssetInstance :returns: twilio.rest.serverless.v1.service.asset.AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ super(AssetInstance, self).__init__(version) # Marshaled Properties self._properties = { 'sid': payload.get('sid'), 'account_sid': payload.get('account_sid'), 'service_sid': payload.get('service_sid'), 'friendly_name': payload.get('friendly_name'), 'date_created': deserialize.iso8601_datetime(payload.get('date_created')), 'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')), 'url': payload.get('url'), 'links': payload.get('links'), } # Context self._context = None self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AssetContext for this AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetContext """ if self._context is None: self._context = AssetContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context @property def sid(self): """ :returns: The unique string that identifies the Asset resource :rtype: unicode """ return self._properties['sid'] @property def account_sid(self): """ :returns: The SID of the Account that created the Asset resource :rtype: unicode """ return self._properties['account_sid'] @property def service_sid(self): """ :returns: The SID of the Service that the Asset resource is associated with :rtype: unicode """ return self._properties['service_sid'] @property def friendly_name(self): """ :returns: The string that you assigned to describe the Asset resource :rtype: unicode """ return self._properties['friendly_name'] @property def date_created(self): """ :returns: The ISO 8601 date and time in GMT when the Asset resource was created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The ISO 8601 date and time in GMT when the Asset resource was last updated :rtype: datetime """ return self._properties['date_updated'] @property def url(self): """ :returns: The absolute URL of the Asset resource :rtype: unicode """ return self._properties['url'] @property def links(self): """ :returns: The URLs of the Asset resource's nested resources :rtype: unicode """ return self._properties['links'] def fetch(self): """ Fetch the AssetInstance :returns: The fetched AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ return self._proxy.fetch() def delete(self): """ Deletes the AssetInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() def update(self, friendly_name): """ Update the AssetInstance :param unicode friendly_name: A string to describe the Asset resource :returns: The updated AssetInstance :rtype: twilio.rest.serverless.v1.service.asset.AssetInstance """ return self._proxy.update(friendly_name, ) @property def asset_versions(self): """ Access the asset_versions :returns: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionList :rtype: twilio.rest.serverless.v1.service.asset.asset_version.AssetVersionList """ return self._proxy.asset_versions def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Serverless.V1.AssetInstance {}>'.format(context)
mit
-2,923,306,765,211,523,600
33.219298
97
0.616188
false
bluephlavio/latest
test/test_core.py
1
2596
import pytest import yaml from latest.core import Grammar, contextify from latest.exceptions import PyExprSyntaxError, ContextError @pytest.fixture(scope='module') def grammar(): return Grammar() @pytest.fixture(scope='module') def context(data_file): with open(data_file, 'r') as f: data = yaml.load(f, Loader=yaml.FullLoader) return contextify(data) @pytest.fixture(params=[ # expr, result (' 2 * 2 ', 4), (' 2 * data.scalar ', 6), (' sum(data.vector) ', 6), (' data.vector ', [1, 2, 3]), (' data.tensor.true + data.tensor.false ', 1), (' * ', PyExprSyntaxError), (' x ', ContextError), ]) def pyexpr_data(request): return request.param def test_pyexpr(grammar, context, pyexpr_data): pyexpr, result = r'{$' + pyexpr_data[0] + r'$}', pyexpr_data[1] toks = grammar.pyexpr.parseString(pyexpr) handler = toks[0] try: assert handler.eval(context) == result except Exception as e: assert e.__class__ == result @pytest.fixture(params=[ (' 2 * 2 ', '4'), (' data.scalar ', '3'), (' * ', PyExprSyntaxError), (' x ', ContextError), ]) def str_pyexpr_data(request): return request.param def test_str_pyexpr(grammar, context, str_pyexpr_data): str_pyexpr, result = r'\latest{$' + str_pyexpr_data[0] + r'$}', str_pyexpr_data[1] toks = grammar.str_pyexpr.parseString(str_pyexpr) handler = toks[0] try: assert handler.eval(context) == result except Exception as e: assert e.__class__ == result @pytest.fixture(params=[ # context, options, content, result (r'\begin{latest}{$ data $}\latest{$ scalar $}\end{latest}', '3'), (r'\begin{latest}{$ [{"n": n} for n in data.vector] $}\latest{$ n $}\end{latest}', '123'), ]) def env_data(request): return request.param def test_env(grammar, context, env_data): env, result = env_data toks = grammar.env.parseString(env) handler = toks[0] try: assert handler.eval(context) == result except Exception as e: assert e.__class__ == result @pytest.fixture(params=[ (r'scalar = \latest{$ data.scalar $}', 'scalar = 3'), (r'\begin{latest}{$ [{"n": n} for n in data.vector] $}\latest{$ n $}\end{latest}, bla', '123, bla'), ]) def grammar_data(request): return request.param def test_grammar(grammar, context, grammar_data): (g, result) = grammar_data toks = grammar.grammar.parseString(g) handler = toks[0] try: assert handler.eval(context) == result except Exception as e: assert e.__class__ == result
mit
1,981,273,305,531,651,800
25.489796
104
0.611325
false
Ederson77/Inverter
inverter.py
1
6853
#!/usr/bin/env python #-*- coding: utf-8 -*- import pygtk pygtk.require("2.0") #Essa linha define a versão do pygtk a ser importado import gtk, gobject, cairo, gio, pango, atk, pangocairo #Criando a Classe do Programa class InverterApp(object): def __init__(self): #Agora como eu havia dito vamos utilizar uma função da classe gtk.Builder #para carregar o arquivo XML gerado pelo Glade. builder = gtk.Builder() #Primeiramente criamos uma instância da classe builder.add_from_file("inverter.glade") #Função para carregar o arquivo #Agora nós podemos acessar os widgets do arquivo XML, caso você tenha #alterado o nome de algum widget e não seguiu o tutorial exatamente como #eu fiz, preste atenção e utilize o nome que você definiu. #Caso não se lembre os nomes abra o arquivo XML no glade novamente e veja #os nomes na ávore de widgets do lado direito. #Utilizaremos a função get_object passando como parâmetro o nome do widget. #Obtendo o widget window1 nossa janela principal self.window = builder.get_object("window1") #Obtendo o widget text_entry (a área de texto do nosso programa) pois #iremos utiliza-la nas funções de inversão da URL e para adicionar o a #URL já invertida self.text_area = builder.get_object("text_entry") #Obtendo o widget about_dialog (janelinha com as informações do programa) self.about = builder.get_object("about_dialog") #Exibindo a janela do programa self.window.show() #Agora nós iremos conectar os sinais que definimos para cada widget no #Glade Para isso usamos a função connect_signals(). Se você definiu o nome #de algum sinal de um modo diferente dos que eu utilizei preste atenção #nessa parte e substitua pelo nome que você definiu. builder.connect_signals({"gtk_main_quit": gtk.main_quit, #Sinal da janela principal, conectada a função #do gtk que fecha o nosso programa "on_button_invert_clicked": self.invert_url, #Sinal do botão ao ser clicado, o valor é uma #função que vamos criar mais a frente no código. "on_text_entry_activate": self.invert_url, #Sinal da área de texto ao ser pressionado a tecla #enter, perceba que utilizamos a mesma função que #utilizamos no valor do botão, ou seja se o usuário #clicar no botao ou apertar enter a URL será #invertida pois os sinais chamam a mesma função de #inversão. "on_copy_activate": self.copy_text, #Sinal do item Copiar do Menu Editar, onde chamamos #a função que copia o texto para o clipboard do #sistema. Essa função nós iremos cria-la ainda. "on_paste_activate": self.paste_text, #Sinal do item Colar do Menu Editar, com a função #paste_text que definiremos mais a frente "on_delete_activate": self.delete_text, #Sinal do item Excluir do Menu Editar, a função #delete_text será definida mais a frente. "on_about_activate": self.about_window, #Sinal do Item Sobre do Menu Ajuda, a função será #criado ainda. }) #Criando as funções que eu especifiquei como valor no dicionário dos Sinais def invert_url(self, widget): """Função Principal do programa, irá armazenar a URL que o usuário digitar na área de texto e inverte-la""" #Usando a variável text_area que contém o widget da área de texto #usamos a função get_text() para pegar o texto que o usuário digitar #essa é uma função de qualquer instância de um objeto gtk.Entry #Veja mais a respeito sobre na documentação oficial no final do Post. url = self.text_area.get_text() #Invertendo a URL que foi armazenada na variável text url = url[::-1] #Adicionando a URL já invertida na área de texto, para isso usamos a #função set_text() também disponível em objetos do tipo gtk.Entry self.text_area.set_text(url) def copy_text(self, widget): """Função para copiar o valor digitado na área de texto para o clipboard do sistema""" #Obtendo o acesso ao clipboard do sistema clipboard = gtk.clipboard_get() #Obtendo a URL digitada na área de texto para que possamos copia-la url = self.text_area.get_text() #Copiando a URL para o clipboard do sistema clipboard.set_text(url) #Para que a URL permanceça amazenada no clipboard mesmo depois do #programa ser encerrado utilizamos a função store() do gtk.clipboard clipboard.store() def paste_text(self, widget): """Função para colar o texto armazenado no clipboard na área de texto do programa""" #Obtendo o acesso ao clipboard do sistema clipboard = gtk.clipboard_get() #Obtendo o texto armazenado no clipboard url = clipboard.wait_for_text() #Inserindo o texto obtido do clipboard na área de texto do programa self.text_area.set_text(url) def delete_text(self, widget): """Função para apagar qualquer texto que esteja inserido na área de texto""" #Como queremos apagar qualquer texto que se encontra na área de texto #apenas inserimos uma stirng vazia na área de texto self.text_area.set_text("") def about_window(self, widget): """Função para exibir a Janela Sobre do programa""" #Executando a Janela Sobre self.about.run() #Ativando a opção fechar da Janela Sobre self.about.hide() if __name__ == "__main__": #Criando uma instância do Programa app = InverterApp() #Função do GTK que deixa a janela principal do nosso programa em loop para #que ela permanceça em execução, sendo encerrada apenas ao chamar a função #gtk.main_quit que está configurado no sinal gtk_main_quit, referente ao #botão fechar do programa gtk.main()
gpl-2.0
-8,796,580,482,553,545,000
45.784722
79
0.59902
false
emilybache/texttest-runner
src/main/python/storytext/lib/storytext/gtktoolkit/simulator/filechooserevents.py
1
5112
""" Event-handling around gtk.FileChoosers of various sorts """ from baseevents import StateChangeEvent from storytext.definitions import UseCaseScriptError import gtk, os # At least on Windows this doesn't seem to happen immediately, but takes effect some time afterwards # Seems quite capable of generating too many of them also class FileChooserFolderChangeEvent(StateChangeEvent): signalName = "current-folder-changed" def __init__(self, name, widget, *args): self.currentFolder = widget.get_current_folder() StateChangeEvent.__init__(self, name, widget) def setProgrammaticChange(self, val, filename=None): if val: self.programmaticChange = val def shouldRecord(self, *args): hasChanged = self.currentFolder is not None and self.widget.get_current_folder() != self.currentFolder self.currentFolder = self.widget.get_current_folder() if not hasChanged: return False ret = StateChangeEvent.shouldRecord(self, *args) self.programmaticChange = False return ret def getProgrammaticChangeMethods(self): return [ self.widget.set_filename ] def getChangeMethod(self): return self.widget.set_current_folder def getStateDescription(self, *args): return os.path.basename(self.widget.get_current_folder()) def getStateChangeArgument(self, argumentString): for folder in self.widget.list_shortcut_folders(): if os.path.basename(folder) == argumentString: return folder folder = os.path.join(self.widget.get_current_folder(), argumentString) if os.path.isdir(folder): return folder else: raise UseCaseScriptError, "Cannot find folder '" + argumentString + "' to change to!" # Base class for selecting a file or typing a file name class FileChooserFileEvent(StateChangeEvent): def __init__(self, name, widget, *args): StateChangeEvent.__init__(self, name, widget) self.currentName = self.getStateDescription() def eventIsRelevant(self): if self.widget.get_filename() is None: return False return self.currentName != self._getStateDescription() def getStateDescription(self, *args): self.currentName = self._getStateDescription() return self.currentName def _getStateDescription(self): fileName = self.widget.get_filename() if fileName: return os.path.basename(fileName) else: return "" class FileChooserFileSelectEvent(FileChooserFileEvent): signalName = "selection-changed" def getChangeMethod(self): return self.widget.select_filename def connectRecord(self, *args): FileChooserFileEvent.connectRecord(self, *args) self.widget.connect("current-folder-changed", self.getStateDescription) def getProgrammaticChangeMethods(self): return [ self.widget.set_filename, self.widget.set_current_folder ] def setProgrammaticChange(self, val, filename=None): FileChooserFileEvent.setProgrammaticChange(self, val) if val and filename: self.currentName = os.path.basename(filename) def shouldRecord(self, *args): if self.currentName: # once we've got a name, everything is permissible... return FileChooserFileEvent.shouldRecord(self, *args) else: self.getStateDescription() return False def getStateChangeArgument(self, argumentString): path = os.path.join(self.widget.get_current_folder(), argumentString) if os.path.exists(path): return path else: raise UseCaseScriptError, "Cannot select file '" + argumentString + "', no such file in current folder" @classmethod def getAssociatedSignatures(cls, widget): if widget.get_property("action") == gtk.FILE_CHOOSER_ACTION_OPEN: return [ cls.getAssociatedSignal(widget) ] else: return [] class FileChooserEntryEvent(FileChooserFileEvent): # There is no such signal on FileChooser, but we can pretend... # We record by waiting for the dialog to be closed, but we don't want to store that signalName = "current-name-changed" @staticmethod def widgetHasSignal(widget, signalName): return widget.isInstanceOf(gtk.FileChooser) # not a real signal, so we fake it def _connectRecord(self, widget, method): # Wait for the dialog to be closed before we record # We must therefore be first among the handlers so we can record # before the dialog close event gets recorded... dialog = widget.get_toplevel() dialog.connect_for_real("response", method, self) def getChangeMethod(self): return self.widget.set_current_name @classmethod def getAssociatedSignatures(cls, widget): if widget.get_property("action") == gtk.FILE_CHOOSER_ACTION_SAVE: return [ cls.getAssociatedSignal(widget) ] else: return []
mit
-6,649,561,579,554,173,000
37.727273
115
0.669797
false
MeirKriheli/statirator
statirator/core/tests.py
1
2131
# -*- coding: utf-8 -*- import datetime from django.utils import unittest from statirator.core.utils import find_readers from statirator.core.readers import dummy_reader from statirator.core.parsers import parse_rst TEST_DOC = """ :slug: some-post-title-slugified :draft: 1 :datetime: 2012-09-12 16:03:15 This will be ignored in main meta section .. -- ================= English title ================= :lang: en :tags: Tag1, Tag2 The content of the English post And another paragraph .. -- ==================== כותרת עברית ==================== :lang: he :tags: פייתון|python, Heb Tag2|slug The content of the post in Hebrew """.decode('utf-8') class CoreTestCase(unittest.TestCase): def test_find_readers(self): "Correctly find readers" readers = find_readers() self.assertIn(dummy_reader, readers) def test_rst_parser(self): """Correctly parse multilingual rst documents""" parsed = parse_rst(TEST_DOC) generic_metadata, title, content = parsed.next() self.assertEqual(generic_metadata, { 'slug': 'some-post-title-slugified', 'draft': True, 'datetime': datetime.datetime(2012, 9, 12, 16, 3, 15), }) self.assertEqual(content.strip(), u'<p>This will be ignored in main meta section</p>') en_metadata, en_title, en_content = parsed.next() self.assertEqual(en_metadata, {'lang': 'en', 'tags': ['Tag1', 'Tag2']}) self.assertEqual(en_title, u'English title') self.assertEqual(en_content.strip(), u'<p>The content of the English post</p>\n' u'<p>And another paragraph</p>') he_metadata, he_title, he_content = parsed.next() self.assertEqual(he_metadata, { 'lang': 'he', 'tags': ['פייתון|python'.decode('utf-8'), 'Heb Tag2|slug'] }) self.assertEqual(he_title, 'כותרת עברית'.decode('utf-8')) self.assertEqual(he_content.strip(), u'<p>The content of the post in Hebrew</p>')
mit
-6,562,754,041,819,381,000
26.25974
79
0.58504
false
niavok/perroquet
perroquetlib/gui/gui_sequence_properties_advanced.py
1
18351
# -*- coding: utf-8 -*- # Copyright (C) 2009-2011 Frédéric Bertolus. # # This file is part of Perroquet. # # Perroquet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Perroquet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Perroquet. If not, see <http://www.gnu.org/licenses/>. import gettext import os import gtk from perroquetlib.config import config from perroquetlib.model.exercise import Exercise from perroquetlib.model.languages_manager import LanguagesManager from perroquetlib.model.sub_exercise import SubExercise _ = gettext.gettext class GuiSequencePropertiesAdvanced: def __init__(self, core, parent): self.core = core self.config = config self.parent = parent self.builder = gtk.Builder() self.builder.set_translation_domain("perroquet") self.builder.add_from_file(self.config.get("ui_sequence_properties_advanced_path")) self.builder.connect_signals(self) self.dialog = self.builder.get_object("dialogExercisePropertiesAdvanced") self.treeviewPathsList = self.builder.get_object("treeviewPathsList") self.dialog.set_modal(True) self.dialog.set_transient_for(self.parent) self.iterPath = None def run(self): self.load() self.dialog.run() self.dialog.destroy() def load(self): exercise = self.core.get_exercise() if len(exercise.subExercisesList) > 0: self.__load_path(exercise.subExercisesList[0].get_video_path(), exercise.subExercisesList[0].get_exercise_path(), exercise.subExercisesList[0].get_translation_path()) else: self._Load("", "", "") self.pathListStore = gtk.ListStore(str, str, str, str) for subExercise in exercise.subExercisesList: name = os.path.basename(subExercise.get_video_path()) self.pathListStore.append([name, subExercise.get_video_path(), subExercise.get_exercise_path(), subExercise.get_translation_path()]) cell = gtk.CellRendererText() treeviewcolumnPath = gtk.TreeViewColumn(_("Path")) treeviewcolumnPath.pack_start(cell, True) treeviewcolumnPath.add_attribute(cell, 'markup', 0) treeviewcolumnPath.set_expand(True) columns = self.treeviewPathsList.get_columns() for column in columns: self.treeviewPathsList.remove_column(column) self.treeviewPathsList.append_column(treeviewcolumnPath) self.treeviewPathsList.set_model(self.pathListStore) self.treeviewSelectionPathsList = self.treeviewPathsList.get_selection() self.iterPath = self.pathListStore.get_iter_first() self.treeviewSelectionPathsList.select_iter(self.iterPath) checkbuttonRepeatAfterComplete = self.builder.get_object("checkbuttonRepeatAfterComplete") checkbuttonRepeatAfterComplete.set_active(self.core.get_exercise().get_repeat_after_completed()) checkbuttonUseDynamicCorrection = self.builder.get_object("checkbuttonUseDynamicCorrection") checkbuttonUseDynamicCorrection.set_active(self.core.get_exercise().is_use_dynamic_correction()) checkbuttonRandomOrder = self.builder.get_object("checkbuttonRandomOrder") checkbuttonRandomOrder.set_active(self.core.get_exercise().is_random_order()) checkbutton_disable_help = self.builder.get_object("checkbutton_disable_help") checkbutton_disable_help.set_active(self.core.get_exercise().is_lock_help()) self.liststoreLanguage = gtk.ListStore(str, str) languageManager = LanguagesManager() languagesList = languageManager.get_languages_list() currentLangId = self.core.get_exercise().get_language_id() for language in languagesList: iter = self.liststoreLanguage.append([language.name, language.id]) if language.id == currentLangId: currentIter = iter comboboxLanguage = self.builder.get_object("comboboxLanguage") cell = gtk.CellRendererText() comboboxLanguage.set_model(self.liststoreLanguage) comboboxLanguage.pack_start(cell, True) comboboxLanguage.add_attribute(cell, 'text', 0) comboboxLanguage.set_active_iter(currentIter) adjustmentTimeBetweenSequence = self.builder.get_object("adjustmentTimeBetweenSequence") adjustmentTimeBetweenSequence.set_value(self.core.get_exercise().get_time_between_sequence()) adjustmentMaximumSequenceTime = self.builder.get_object("adjustmentMaximumSequenceTime") adjustmentMaximumSequenceTime.set_value(self.core.get_exercise().get_max_sequence_length()) adjustmentTimeBeforeSequence = self.builder.get_object("adjustmentTimeBeforeSequence") adjustmentTimeBeforeSequence.set_value(self.core.get_exercise().get_play_margin_before()) adjustmentTimeAfterSequence = self.builder.get_object("adjustmentTimeAfterSequence") adjustmentTimeAfterSequence.set_value(self.core.get_exercise().get_play_margin_after()) entryExerciseName = self.builder.get_object("entryExerciseName") if self.core.get_exercise().get_name(): entryExerciseName.set_text(self.core.get_exercise().get_name()) else: entryExerciseName.set_text("") entryRepeatCountLimit = self.builder.get_object("entryRepeatCountLimit") entryRepeatCountLimit.set_text(str(self.core.get_exercise().get_repeat_count_limit_by_sequence())) #Locks checkbutton_lock_properties = self.builder.get_object("checkbutton_lock_properties") checkbutton_lock_properties.set_active(self.core.get_exercise().is_lock_properties()) checkbutton_lock_correction = self.builder.get_object("checkbutton_lock_correction") checkbutton_lock_correction.set_active(self.core.get_exercise().is_lock_correction()) self._update_path_buttons() def __load_path(self, videoPath, exercisePath, translationPath): if videoPath == "": videoPath = "None" if exercisePath == "": exercisePath = "None" if translationPath == "": translationPath = "None" videoChooser = self.builder.get_object("filechooserbuttonVideoProp") exerciseChooser = self.builder.get_object("filechooserbuttonExerciseProp") translationChooser = self.builder.get_object("filechooserbuttonTranslationProp") videoChooser.set_filename(videoPath) exerciseChooser.set_filename(exercisePath) translationChooser.set_filename(translationPath) if videoPath and os.path.isfile(videoPath): filePath = os.path.dirname(videoPath) if not exercisePath or not os.path.isfile(exercisePath): exerciseChooser.set_current_folder(filePath) if not translationPath or not os.path.isfile(translationPath): translationChooser.set_current_folder(filePath) def on_treeview_paths_list_cursor_changed(self, widget, data=None): (modele, iter) = self.treeviewSelectionPathsList.get_selected() self.__store_path_changes() self.iterPath = iter self._update_path_buttons() if iter == None: return videoPath, exercisePath, translationPath = modele.get(iter, 1, 2, 3) self.__load_path(videoPath, exercisePath, translationPath) def _update_path_buttons(self): if self.iterPath == None: buttonRemovePath = self.builder.get_object("buttonRemovePath") buttonRemovePath.set_sensitive(False) buttonUpPath = self.builder.get_object("buttonUpPath") buttonUpPath.set_sensitive(False) buttonDownPath = self.builder.get_object("buttonDownPath") buttonDownPath.set_sensitive(False) else: buttonRemovePath = self.builder.get_object("buttonRemovePath") buttonRemovePath.set_sensitive(True) buttonUpPath = self.builder.get_object("buttonUpPath") if self.previous_iter(self.pathListStore, self.iterPath) == None: buttonUpPath.set_sensitive(False) else: buttonUpPath.set_sensitive(True) buttonDownPath = self.builder.get_object("buttonDownPath") if self.pathListStore.iter_next(self.iterPath) == None: buttonDownPath.set_sensitive(False) else: buttonDownPath.set_sensitive(True) def on_button_exercise_prop_ok_clicked(self, widget, data=None): self.__store_path_changes() checkbuttonRepeatAfterComplete = self.builder.get_object("checkbuttonRepeatAfterComplete") self.core.get_exercise().set_repeat_after_completed(checkbuttonRepeatAfterComplete.get_active()) checkbuttonUseDynamicCorrection = self.builder.get_object("checkbuttonUseDynamicCorrection") self.core.get_exercise().set_use_dynamic_correction(checkbuttonUseDynamicCorrection.get_active()) checkbuttonRandomOrder = self.builder.get_object("checkbuttonRandomOrder") self.core.get_exercise().set_random_order(checkbuttonRandomOrder.get_active()) comboboxLanguage = self.builder.get_object("comboboxLanguage") self.liststoreLanguage.get_iter_first() iter = comboboxLanguage.get_active_iter() langId = self.liststoreLanguage.get_value(iter, 1) self.core.get_exercise().set_language_id(langId) adjustmentTimeBetweenSequence = self.builder.get_object("adjustmentTimeBetweenSequence") self.core.get_exercise().set_time_between_sequence(adjustmentTimeBetweenSequence.get_value()) adjustmentMaximumSequenceTime = self.builder.get_object("adjustmentMaximumSequenceTime") self.core.get_exercise().set_max_sequence_length(adjustmentMaximumSequenceTime.get_value()) adjustmentTimeBeforeSequence = self.builder.get_object("adjustmentTimeBeforeSequence") self.core.get_exercise().set_play_margin_before(int(adjustmentTimeBeforeSequence.get_value())) adjustmentTimeAfterSequence = self.builder.get_object("adjustmentTimeAfterSequence") self.core.get_exercise().set_play_margin_after(int(adjustmentTimeAfterSequence.get_value())) entryExerciseName = self.builder.get_object("entryExerciseName") self.core.get_exercise().set_name(entryExerciseName.get_text()) entryRepeatCountLimit = self.builder.get_object("entryRepeatCountLimit") self.core.get_exercise().set_repeat_count_limit_by_sequence(int(entryRepeatCountLimit.get_text())) entryRepeatCountLimit.set_text(str(self.core.get_exercise().get_repeat_count_limit_by_sequence())) if self.core.get_exercise().get_repeat_count_limit_by_sequence() == 0: self.core.get_exercise().clear_sequence_repeat_count() #Locks checkbutton_disable_help = self.builder.get_object("checkbutton_disable_help") self.core.get_exercise().set_lock_help(checkbutton_disable_help.get_active()) checkbutton_lock_properties = self.builder.get_object("checkbutton_lock_properties") lock_properties = checkbutton_lock_properties.get_active() entry_lock_properties = self.builder.get_object("entry_lock_properties") lock_properties_password = entry_lock_properties.get_text() if len(lock_properties_password) == 0: lock_properties_password = None if lock_properties != self.core.get_exercise().is_lock_properties() or lock_properties_password is not None: self.core.get_exercise().set_lock_properties(lock_properties, lock_properties_password) checkbutton_lock_correction = self.builder.get_object("checkbutton_lock_correction") lock_correction = checkbutton_lock_correction.get_active() entry_lock_correction = self.builder.get_object("entry_lock_correction") lock_correction_password = entry_lock_correction.get_text() if len(lock_correction_password) == 0: lock_correction_password = None if lock_correction != self.core.get_exercise().is_lock_correction() or lock_correction_password is not None: self.core.get_exercise().set_lock_correction(lock_correction, lock_correction_password) # Update paths if len(self.pathListStore) != len(self.core.get_exercise().subExercisesList): self.core.get_exercise().subExercisesList = [] for subPath in self.pathListStore: self.core.get_exercise().subExercisesList.append(SubExercise(self.core.get_exercise())) for i, subPath in enumerate(self.pathListStore): self.core.get_exercise().subExercisesList[i].set_video_path(subPath[1]) self.core.get_exercise().subExercisesList[i].set_exercise_path(subPath[2]) self.core.get_exercise().subExercisesList[i].set_translation_path(subPath[3]) self.core.update_properties() self.core.set_can_save(True) self.dialog.response(gtk.RESPONSE_OK) def on_button_exercise_prop_cancel_clicked(self, widget, data=None): self.dialog.response(gtk.RESPONSE_CANCEL) def __store_path_changes(self): if self.iterPath == None: return videoChooser = self.builder.get_object("filechooserbuttonVideoProp") videoPath = videoChooser.get_filename() exerciseChooser = self.builder.get_object("filechooserbuttonExerciseProp") exercisePath = exerciseChooser.get_filename() translationChooser = self.builder.get_object("filechooserbuttonTranslationProp") translationPath = translationChooser.get_filename() if videoPath == "None" or videoPath == None: videoPath = "" if exercisePath == "None" or exercisePath == None: exercisePath = "" if translationPath == "None" or translationPath == None: translationPath = "" if self.iterPath == None: return self.iterPath self.pathListStore.set_value(self.iterPath, 0, os.path.basename(videoPath)) self.pathListStore.set_value(self.iterPath, 1, videoPath) self.pathListStore.set_value(self.iterPath, 2, exercisePath) self.pathListStore.set_value(self.iterPath, 3, translationPath) def on_filechooserbutton_video_prop_file_set(self, widget, data=None): videoChooser = self.builder.get_object("filechooserbuttonVideoProp") exerciseChooser = self.builder.get_object("filechooserbuttonExerciseProp") translationChooser = self.builder.get_object("filechooserbuttonTranslationProp") fileName = videoChooser.get_filename() if fileName and os.path.isfile(fileName): filePath = os.path.dirname(fileName) if not exerciseChooser.get_filename() or not os.path.isfile(exerciseChooser.get_filename()): exerciseChooser.set_current_folder(filePath) if not translationChooser.get_filename() or not os.path.isfile(translationChooser.get_filename()): translationChooser.set_current_folder(filePath) self.__store_path_changes() def previous_iter(self, model, iter): if not iter: return None path = model.get_string_from_iter(iter) if not path: return None prow = int(path) - 1 if prow == -1: return None prev = model.get_iter_from_string("%d" % prow) return prev def on_button_down_path_clicked(self, widget, data=None): self.pathListStore.move_after(self.iterPath, self.pathListStore.iter_next(self.iterPath)) self._update_path_buttons() def on_button_up_path_clicked(self, widget, data=None): self.pathListStore.move_before(self.iterPath, self.previous_iter(self.pathListStore, self.iterPath)) self._update_path_buttons() def on_button_add_path_clicked(self, widget, data=None): self.__store_path_changes() if self.iterPath is None: self.iterPath = self.pathListStore.get_iter_first() while self.pathListStore.iter_next(self.iterPath) is not None: self.iterPath = self.pathListStore.iter_next(self.iterPath) iter = self.pathListStore.insert_after(self.iterPath, [self.pathListStore.get_value(self.iterPath, 0), self.pathListStore.get_value(self.iterPath, 1), self.pathListStore.get_value(self.iterPath, 2), self.pathListStore.get_value(self.iterPath, 3)]) self.iterPath = None self.treeviewSelectionPathsList.select_iter(iter) def on_button_remove_path_clicked(self, widget, data=None): self.pathListStore.remove(self.iterPath) self.iterPath = None self._update_path_buttons() def on_button_defaut_time_between_sequences_clicked(self, widget, data=None): adjustmentTimeBetweenSequence = self.builder.get_object("adjustmentTimeBetweenSequence") exercice = Exercise() adjustmentTimeBetweenSequence.set_value(exercice.get_time_between_sequence()) def on_button_defaut_maximum_sequence_time_clicked(self, widget, data=None): adjustmentMaximumSequenceTime = self.builder.get_object("adjustmentMaximumSequenceTime") exercice = Exercise() adjustmentMaximumSequenceTime.set_value(exercice.get_max_sequence_length()) def on_button_defaut_time_before_sequence_clicked(self, widget, data=None): adjustmentTimeBeforeSequence = self.builder.get_object("adjustmentTimeBeforeSequence") exercice = Exercise() adjustmentTimeBeforeSequence.set_value(exercice.get_play_margin_before()) def on_button_defaut_time_after_sequence_clicked(self, widget, data=None): adjustmentTimeAfterSequence = self.builder.get_object("adjustmentTimeAfterSequence") exercice = Exercise() adjustmentTimeAfterSequence.set_value(exercice.get_play_margin_after())
gpl-3.0
-2,187,032,419,098,357,800
44.083538
255
0.695733
false
picrin/PredictiveCollision
src/Numerical.py
1
1775
import math class Numerical: @staticmethod def pointAtAngle(coords, angle, distance): return (coords[0] + math.sin(angle)*distance, coords[1] + math.cos(angle)*distance) @staticmethod def solveQuadraticPrune(equation): # choose the fastest delta = equation[1]**2 - 4*equation[0]*equation[2] if delta < 0: return float("+inf") else: solution = [((-equation[1] + delta**(1.0/2.0))/(2*(equation[0]))), ((-equation[1] - delta**(1.0/2.0))/(2*(equation[0])))] if min(solution) > 0: return min(solution) elif max(solution) > 0: return max(solution) else: return float("+inf") @staticmethod def solveQuadraticPrune_(equation): # choose the fastest. Might not be sufficiently tested. solution = numpy.roots(equation) # well, maybe solution = numpy.real_if_close(numpy.roots(equation)) return Numerical.solutionPruning(solution) @staticmethod def solutionPruning(twoSolutions): """ @dev-only -- return least real positive from a sequence of two complex numbers (or exceptionally one). If there is no such number, returns one. """ try: if twoSolutions[0].imag == 0: if min(twoSolutions) > 0: return min(twoSolutions) elif max(twoSolutions) > 0: return max(twoSolutions) else: return float("+inf") else: return float("+inf") except IndexError: print "unusual stuff going on with solution pruning. Leading coefficient == 0?" return twoSolutions[0].real @staticmethod def solveLinear(equation): try: solution = -1 * equation[1]/equation[0] if solution > 0: return solution else: return float("+inf") #a + bt = 0 #bt = -a #t = -a/b except ZeroDivisionError: print "unsual stuff going on with linear solver. Speed = 0?!" return float("+inf")
bsd-2-clause
3,502,172,420,127,385,000
28.583333
145
0.668732
false
impactlab/eemeter
eemeter/ee/derivatives.py
1
4412
from datetime import datetime import pandas as pd import pytz def annualized_weather_normal(formatter, model, weather_normal_source): ''' Annualize energy trace values given a model and a source of 'normal' weather data, such as Typical Meteorological Year (TMY) 3 data. Parameters ---------- formatter : eemeter.modeling.formatter.Formatter Formatter that can be used to create a demand fixure. Must supply the :code:`.create_demand_fixture(index, weather_source)` method. model : eemeter.modeling.models.Model Model that can be used to predict out of sample energy trace values. Must supply the :code:`.predict(demand_fixture_data)` method. weather_normal_source : eemeter.weather.WeatherSource WeatherSource providing weather normals. Returns ------- out : dict Dictionary with the following item: - :code:`"annualized_weather_normal"`: 4-tuple with the values :code:`(annualized, lower, upper, n)`, where - :code:`annualized` is the total annualized (weather normalized) value predicted over the course of a 'normal' weather year. - :code:`lower` is the number which should be subtracted from :code:`annualized` to obtain the 0.025 quantile lower error bound. - :code:`upper` is the number which should be added to :code:`annualized` to obtain the 0.975 quantile upper error bound. - :code:`n` is the number of samples considered in developing the bound - useful for adding other values with errors. ''' normal_index = pd.date_range('2015-01-01', freq='D', periods=365, tz=pytz.UTC) demand_fixture_data = formatter.create_demand_fixture( normal_index, weather_normal_source) normals = model.predict(demand_fixture_data) annualized = normals.sum() n = normal_index.shape[0] upper = (model.upper**2 * n)**0.5 lower = (model.lower**2 * n)**0.5 return { "annualized_weather_normal": (annualized, lower, upper, n), } def gross_predicted(formatter, model, weather_source, reporting_period): ''' Find gross predicted energy trace values given a model and a source of observed weather data. Parameters ---------- formatter : eemeter.modeling.formatter.Formatter Formatter that can be used to create a demand fixure. Must supply the :code:`.create_demand_fixture(index, weather_source)` method. model : eemeter.modeling.models.Model Model that can be used to predict out of sample energy trace values. Must supply the :code:`.predict(demand_fixture_data)` method. weather_source : eemeter.weather.WeatherSource WeatherSource providing observed weather data. baseline_period : eemeter.structures.ModelingPeriod Period targetted by baseline model. reporting_period : eemeter.structures.ModelingPeriod Period targetted by reporting model. Returns ------- out : dict Dictionary with the following item: - :code:`"gross_predicted"`: 4-tuple with the values :code:`(annualized, lower, upper, n)`, where - :code:`gross_predicted` is the total gross predicted value over time period defined by the reporting period. - :code:`lower` is the number which should be subtracted from :code:`gross_predicted` to obtain the 0.025 quantile lower error bound. - :code:`upper` is the number which should be added to :code:`gross_predicted` to obtain the 0.975 quantile upper error bound. - :code:`n` is the number of samples considered in developing the bound - useful for adding other values with errors. ''' start_date = reporting_period.start_date end_date = reporting_period.end_date if end_date is None: end_date = datetime.utcnow() index = pd.date_range(start_date, end_date, freq='D', tz=pytz.UTC) demand_fixture_data = formatter.create_demand_fixture( index, weather_source) values = model.predict(demand_fixture_data) gross_predicted = values.sum() n = index.shape[0] upper = (model.upper**2 * n)**0.5 lower = (model.lower**2 * n)**0.5 return { "gross_predicted": (gross_predicted, lower, upper, n), }
mit
5,515,248,473,266,296,000
38.392857
78
0.653445
false
codelieche/codelieche.com
apps/account/views/page/user.py
1
1944
# -*- coding:utf-8 -*- from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from django.contrib.auth import authenticate, login, logout from django.views.generic import View from account.forms import LoginForm class LoginPageView(View): """ 用户登陆 """ # print(request.COOKIES) # print(request.user) # AnonymousUser def get(self, request): url = request.get_full_path() form = LoginForm() return render(request, 'account/login.html', {'form': form, 'url': url}) def post(self, request): url = request.get_full_path() form = LoginForm(request.POST) if form.is_valid(): form_cleaned = form.cleaned_data user = authenticate(username=form_cleaned['username'], password=form_cleaned['password']) if user is not None: # 判断用户是否是激活的 if user.is_active: login(request, user) # return HttpResponse('{sucess:true}') next_url = request.GET.get('next', '/article/create') return HttpResponseRedirect(redirect_to=next_url) else: message = "用户非激活状态" # return HttpResponse('{sucess:false}') else: message = "用户名或者密码错误" else: # 数据清理后,不符合要求 message = "输入的内容不合法" return render(request, 'account/login.html', {'form': form, 'url': url, 'msg': message}) def user_logout(request): """用户退出""" logout(request) next_url = request.GET.get('next', '/account/login') return HttpResponseRedirect(redirect_to=next_url)
mit
-1,653,475,500,208,617,000
33.679245
80
0.536453
false
valesi/electrum
gui/text.py
1
17767
import tty, sys import curses, datetime, locale from decimal import Decimal from electrum.util import format_satoshis, set_verbosity from electrum.util import StoreDict from electrum.bitcoin import is_valid, COIN, TYPE_ADDRESS from electrum import Wallet, WalletStorage _ = lambda x:x class ElectrumGui: def __init__(self, config, network, daemon, plugins): self.config = config self.network = network storage = WalletStorage(config.get_wallet_path()) if not storage.file_exists: print "Wallet not found. try 'electrum create'" exit() self.wallet = Wallet(storage) self.wallet.start_threads(self.network) self.contacts = StoreDict(self.config, 'contacts') locale.setlocale(locale.LC_ALL, '') self.encoding = locale.getpreferredencoding() self.stdscr = curses.initscr() curses.noecho() curses.cbreak() curses.start_color() curses.use_default_colors() curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE) curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN) curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE); self.stdscr.keypad(1) self.stdscr.border(0) self.maxy, self.maxx = self.stdscr.getmaxyx() self.set_cursor(0) self.w = curses.newwin(10, 50, 5, 5) set_verbosity(False) self.tab = 0 self.pos = 0 self.popup_pos = 0 self.str_recipient = "" self.str_description = "" self.str_amount = "" self.str_fee = "" self.history = None if self.network: self.network.register_callback(self.update, ['updated']) self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")] self.num_tabs = len(self.tab_names) def set_cursor(self, x): try: curses.curs_set(x) except Exception: pass def restore_or_create(self): pass def verify_seed(self): pass def get_string(self, y, x): self.set_cursor(1) curses.echo() self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE) s = self.stdscr.getstr(y,x) curses.noecho() self.set_cursor(0) return s def update(self, event): self.update_history() if self.tab == 0: self.print_history() self.refresh() def print_history(self): width = [20, 40, 14, 14] delta = (self.maxx - sum(width) - 4)/3 format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s" if self.history is None: self.update_history() self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance"))) def update_history(self): width = [20, 40, 14, 14] delta = (self.maxx - sum(width) - 4)/3 format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s" b = 0 self.history = [] for item in self.wallet.get_history(): tx_hash, conf, value, timestamp, balance = item if conf: try: time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3] except Exception: time_str = "------" else: time_str = 'pending' label = self.wallet.get_label(tx_hash) if len(label) > 40: label = label[0:37] + '...' self.history.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) ) def print_balance(self): if not self.network: msg = _("Offline") elif self.network.is_connected(): if not self.wallet.up_to_date: msg = _("Synchronizing...") else: c, u, x = self.wallet.get_balance() msg = _("Balance")+": %f "%(Decimal(c) / COIN) if u: msg += " [%f unconfirmed]"%(Decimal(u) / COIN) if x: msg += " [%f unmatured]"%(Decimal(x) / COIN) else: msg = _("Not connected") self.stdscr.addstr( self.maxy -1, 3, msg) for i in range(self.num_tabs): self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0) self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")])) def print_receive(self): addr = self.wallet.get_unused_address(None) self.stdscr.addstr(2, 1, "Address: "+addr) self.print_qr(addr) def print_contacts(self): messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items()) self.print_list(messages, "%19s %15s "%("Key", "Value")) def print_addresses(self): fmt = "%-35s %-30s" messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.addresses()) self.print_list(messages, fmt % ("Address", "Label")) def print_edit_line(self, y, label, text, index, size): text += " "*(size - len(text) ) self.stdscr.addstr( y, 2, label) self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1)) def print_send_tab(self): self.stdscr.clear() self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40) self.print_edit_line(5, _("Description"), self.str_description, 1, 40) self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15) self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15) self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2)) self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2)) def print_banner(self): if self.network: self.print_list( self.network.banner.split('\n')) def print_qr(self, data): import qrcode, StringIO s = StringIO.StringIO() self.qr = qrcode.QRCode() self.qr.add_data(data) self.qr.print_ascii(out=s, invert=False) msg = s.getvalue() lines = msg.split('\n') for i, l in enumerate(lines): l = l.encode("utf-8") self.stdscr.addstr(i+5, 5, l, curses.color_pair(3)) def print_list(self, list, firstline = None): self.maxpos = len(list) if not self.maxpos: return if firstline: firstline += " "*(self.maxx -2 - len(firstline)) self.stdscr.addstr( 1, 1, firstline ) for i in range(self.maxy-4): msg = list[i] if i < len(list) else "" msg += " "*(self.maxx - 2 - len(msg)) m = msg[0:self.maxx - 2] m = m.encode(self.encoding) self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0) def refresh(self): if self.tab == -1: return self.stdscr.border(0) self.print_balance() self.stdscr.refresh() def main_command(self): c = self.stdscr.getch() print c if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs elif c == curses.KEY_DOWN: self.pos +=1 elif c == curses.KEY_UP: self.pos -= 1 elif c == 9: self.pos +=1 # tab elif curses.unctrl(c) in ['^W', '^C', '^X', '^Q']: self.tab = -1 elif curses.unctrl(c) in ['^N']: self.network_dialog() elif curses.unctrl(c) == '^S': self.settings_dialog() else: return c if self.pos<0: self.pos=0 if self.pos>=self.maxpos: self.pos=self.maxpos - 1 def run_tab(self, i, print_func, exec_func): while self.tab == i: self.stdscr.clear() print_func() self.refresh() c = self.main_command() if c: exec_func(c) def run_history_tab(self, c): if c == 10: out = self.run_popup('',["blah","foo"]) def edit_str(self, target, c, is_num=False): # detect backspace if c in [8, 127, 263] and target: target = target[:-1] elif not is_num or curses.unctrl(c) in '0123456789.': target += curses.unctrl(c) return target def run_send_tab(self, c): if self.pos%6 == 0: self.str_recipient = self.edit_str(self.str_recipient, c) if self.pos%6 == 1: self.str_description = self.edit_str(self.str_description, c) if self.pos%6 == 2: self.str_amount = self.edit_str(self.str_amount, c, True) elif self.pos%6 == 3: self.str_fee = self.edit_str(self.str_fee, c, True) elif self.pos%6==4: if c == 10: self.do_send() elif self.pos%6==5: if c == 10: self.do_clear() def run_receive_tab(self, c): if c == 10: out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"]) def run_contacts_tab(self, c): if c == 10 and self.contacts: out = self.run_popup('Adress', ["Copy", "Pay to", "Edit label", "Delete"]).get('button') key = self.contacts.keys()[self.pos%len(self.contacts.keys())] if out == "Pay to": self.tab = 1 self.str_recipient = key self.pos = 2 elif out == "Edit label": s = self.get_string(6 + self.pos, 18) if s: self.wallet.labels[address] = s def run_banner_tab(self, c): self.show_message(repr(c)) pass def main(self): tty.setraw(sys.stdin) while self.tab != -1: self.run_tab(0, self.print_history, self.run_history_tab) self.run_tab(1, self.print_send_tab, self.run_send_tab) self.run_tab(2, self.print_receive, self.run_receive_tab) self.run_tab(3, self.print_addresses, self.run_banner_tab) self.run_tab(4, self.print_contacts, self.run_contacts_tab) self.run_tab(5, self.print_banner, self.run_banner_tab) tty.setcbreak(sys.stdin) curses.nocbreak() self.stdscr.keypad(0) curses.echo() curses.endwin() def do_clear(self): self.str_amount = '' self.str_recipient = '' self.str_fee = '' self.str_description = '' def do_send(self): if not is_valid(self.str_recipient): self.show_message(_('Invalid Bitcoin address')) return try: amount = int(Decimal(self.str_amount) * COIN) except Exception: self.show_message(_('Invalid Amount')) return try: fee = int(Decimal(self.str_fee) * COIN) except Exception: self.show_message(_('Invalid Fee')) return if self.wallet.use_encryption: password = self.password_dialog() if not password: return else: password = None try: tx = self.wallet.mktx([(TYPE_ADDRESS, self.str_recipient, amount)], password, self.config, fee) except Exception as e: self.show_message(str(e)) return if self.str_description: self.wallet.labels[tx.hash()] = self.str_description h = self.wallet.send_tx(tx) self.show_message(_("Please wait..."), getchar=False) self.wallet.tx_event.wait() status, msg = self.wallet.receive_tx( h, tx ) if status: self.show_message(_('Payment sent.')) self.do_clear() #self.update_contacts_tab() else: self.show_message(_('Error')) def show_message(self, message, getchar = True): w = self.w w.clear() w.border(0) for i, line in enumerate(message.split('\n')): w.addstr(2+i,2,line) w.refresh() if getchar: c = self.stdscr.getch() def run_popup(self, title, items): return self.run_dialog(title, map(lambda x: {'type':'button','label':x}, items), interval=1, y_pos = self.pos+3) def network_dialog(self): if not self.network: return params = self.network.get_parameters() host, port, protocol, proxy_config, auto_connect = params srv = 'auto-connect' if auto_connect else self.network.default_server out = self.run_dialog('Network', [ {'label':'server', 'type':'str', 'value':srv}, {'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')}, ], buttons = 1) if out: if out.get('server'): server = out.get('server') auto_connect = server == 'auto-connect' if not auto_connect: try: host, port, protocol = server.split(':') except Exception: self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"") return False proxy = self.parse_proxy_options(out.get('proxy')) if out.get('proxy') else None self.network.set_parameters(host, port, protocol, proxy, auto_connect) def settings_dialog(self): fee = str(Decimal(self.wallet.fee_per_kb(self.config)) / COIN) out = self.run_dialog('Settings', [ {'label':'Default fee', 'type':'satoshis', 'value': fee } ], buttons = 1) if out: if out.get('Default fee'): fee = int(Decimal(out['Default fee']) * COIN) self.config.set_key('fee_per_kb', fee, True) def password_dialog(self): out = self.run_dialog('Password', [ {'label':'Password', 'type':'password', 'value':''} ], buttons = 1) return out.get('Password') def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3): self.popup_pos = 0 self.w = curses.newwin( 5 + len(items)*interval + (2 if buttons else 0), 50, y_pos, 5) w = self.w out = {} while True: w.clear() w.border(0) w.addstr( 0, 2, title) num = len(items) numpos = num if buttons: numpos += 2 for i in range(num): item = items[i] label = item.get('label') if item.get('type') == 'list': value = item.get('value','') elif item.get('type') == 'satoshis': value = item.get('value','') elif item.get('type') == 'str': value = item.get('value','') elif item.get('type') == 'password': value = '*'*len(item.get('value','')) else: value = '' if value is None: value = '' if len(value)<20: value += ' '*(20-len(value)) if item.has_key('value'): w.addstr( 2+interval*i, 2, label) w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) ) else: w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0) if buttons: w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2)) w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2)) w.refresh() c = self.stdscr.getch() if c in [ord('q'), 27]: break elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1 elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1 else: i = self.popup_pos%numpos if buttons and c==10: if i == numpos-2: return out elif i == numpos -1: return {} item = items[i] _type = item.get('type') if _type == 'str': item['value'] = self.edit_str(item['value'], c) out[item.get('label')] = item.get('value') elif _type == 'password': item['value'] = self.edit_str(item['value'], c) out[item.get('label')] = item ['value'] elif _type == 'satoshis': item['value'] = self.edit_str(item['value'], c, True) out[item.get('label')] = item.get('value') elif _type == 'list': choices = item.get('choices') try: j = choices.index(item.get('value')) except Exception: j = 0 new_choice = choices[(j + 1)% len(choices)] item['value'] = new_choice out[item.get('label')] = item.get('value') elif _type == 'button': out['button'] = item.get('label') break return out
gpl-3.0
8,968,254,148,008,102,000
35.03854
151
0.511679
false
ellipsis14/dolfin-adjoint
dolfin_adjoint/newton_solver.py
1
1424
import dolfin import solving import libadjoint import adjglobals import adjlinalg import utils class NewtonSolver(dolfin.NewtonSolver): '''This object is overloaded so that solves using this class are automatically annotated, so that libadjoint can automatically derive the adjoint and tangent linear models.''' def solve(self, *args, **kwargs): '''To disable the annotation, just pass :py:data:`annotate=False` to this routine, and it acts exactly like the Dolfin solve call. This is useful in cases where the solve is known to be irrelevant or diagnostic for the purposes of the adjoint computation (such as projecting fields to other function spaces for the purposes of visualisation).''' to_annotate = utils.to_annotate(kwargs.pop("annotate", None)) if to_annotate: factory = args[0] vec = args[1] b = dolfin.as_backend_type(vec).__class__() factory.F(b=b, x=vec) F = b.form bcs = b.bcs u = vec.function var = adjglobals.adj_variables[u] solving.annotate(F == 0, u, bcs, solver_parameters={"newton_solver": self.parameters.to_dict()}) newargs = [self] + list(args) out = dolfin.NewtonSolver.solve(*newargs, **kwargs) if to_annotate and dolfin.parameters["adjoint"]["record_all"]: adjglobals.adjointer.record_variable(adjglobals.adj_variables[u], libadjoint.MemoryStorage(adjlinalg.Vector(u))) return out
lgpl-3.0
6,464,112,461,163,999,000
34.6
118
0.706461
false
StefanoD/ComputerVision
Uebung/Uebung2/src/exercise2.py
1
1858
import numpy as np from scipy.misc import imsave from libcore import Img from libcore import DistortionCorrection, DistortionCorrectionPoint image_path = '../schraegbild_tempelhof.jpg' def main(): b1() def b1(): image = Img.load_image(image_path) target_image_size_height = 900 target_image_size_witdh = 600 new_x_size = target_image_size_witdh*3 new_y_size = target_image_size_height*3 new_image = np.zeros((new_x_size, new_y_size, 3)) # links oben pass_point_1_x = 344.0 pass_point_1_y = 334.0 target_point_1_x = 0.0 target_point_1_y = 0.0 # links unten pass_point_2_x = 300.0 pass_point_2_y = 456.0 target_point_2_x = 0.0 target_point_2_y = target_image_size_height pass_point_3_x = 694.0 pass_point_3_y = 432.0 #rechts unten #pass_point_3_x = 690.0 #pass_point_3_y = 460.0 target_point_3_x = target_image_size_witdh target_point_3_y = target_image_size_height #recht oben pass_point_4_x = 548.0 pass_point_4_y = 330.0 target_point_4_x = target_image_size_witdh target_point_4_y = 0.0 translation_factor = 0 points = [DistortionCorrectionPoint(344.0, 344.0, 0.0+translation_factor, 0.0+translation_factor), # links oben DistortionCorrectionPoint(300.0, 456.0, 0.0+translation_factor, target_image_size_height+translation_factor), # links unten DistortionCorrectionPoint(694.0, 432.0, target_image_size_witdh+translation_factor, target_image_size_height+translation_factor), DistortionCorrectionPoint(548.0, 330.0, target_image_size_witdh+translation_factor, 0.0+translation_factor)] # rechts unten new_image = DistortionCorrection.distortion_correction(points, image, new_image) imsave("../images/test.jpg", new_image) if __name__ == "__main__": main()
apache-2.0
4,719,558,664,374,528,000
28.03125
145
0.664693
false
miacro/pkget
pkget/test/test_basic_config.py
1
4954
import unittest from pkget import BasicConfig, PkgetError class BasicConfigTest(unittest.TestCase): def setUp(self): self.basic_config = BasicConfig( attributes={"installprefix": None, "pkginfoprefix": "", "recipepaths": [], "configfiles": ["~/.local"], "globally": False, "install": True, "uninstall": {"value": False}, "dict_attr": {"value": {}}, "sub_attr": {}}) def test_getattr(self): self.assertEqual(self.basic_config.installprefix, None) self.assertEqual(self.basic_config.pkginfoprefix, "") self.assertEqual(self.basic_config.recipepaths, []) self.assertEqual(self.basic_config.configfiles, ["~/.local"]) self.assertEqual(self.basic_config.globally, False) self.assertEqual(self.basic_config.install, True) self.assertEqual(self.basic_config.uninstall, False) self.assertEqual(self.basic_config.dict_attr, {}) self.assertEqual(self.basic_config.sub_attr, None) def test_setattr(self): with self.assertRaises(PkgetError): self.basic_config.installprefix = True self.basic_config.recipepaths = {} self.basic_config.installprefix = "~/.local" self.assertEqual(self.basic_config.installprefix, "~/.local") self.basic_config.pkginfoprefix = "/usr/local" self.assertEqual(self.basic_config.pkginfoprefix, "/usr/local") self.basic_config.recipepaths.append("/usr/local") self.assertEqual(self.basic_config.recipepaths, ["/usr/local"]) self.basic_config.configfiles = ["/usr/local"] self.assertEqual(self.basic_config.configfiles, ["/usr/local"]) self.basic_config.globally = True self.assertEqual(self.basic_config.globally, True) self.basic_config.install = False self.assertEqual(self.basic_config.install, False) self.basic_config.uninstall = True self.assertEqual(self.basic_config.uninstall, True) self.basic_config.dict_attr["a"] = 1 self.assertEqual(self.basic_config.dict_attr, {"a": 1}) self.basic_config.dict_attr = {"b": 2} self.assertEqual(self.basic_config.dict_attr, {"b": 2}) self.basic_config.sub_attr = "test" self.assertEqual(self.basic_config.sub_attr, "test") def test_update_value(self): with self.assertRaises(PkgetError): self.basic_config.update_value("install", ["usr", "local"]) self.basic_config.update_value( "test_not_exists", {"test_not_exists": True}) self.basic_config.update_value( "installprefix", {"installprefix": "~/.local"}) self.assertEqual(self.basic_config.installprefix, "~/.local") self.basic_config.update_value( "configfiles", {"configfiles": ["/usr/local"]}) self.assertEqual( self.basic_config.configfiles, ["~/.local", "/usr/local"]) self.basic_config.update_value( "configfiles", {"configfiles": ["/usr/local"]}, merge_value=False) self.assertEqual( self.basic_config.configfiles, ["/usr/local"]) self.basic_config.update_value("globally", {"globally": False}) self.assertEqual(self.basic_config.globally, False) def test_update_config(self): self.basic_config.update_config( config={"installprefix": "~/", "pkginfoprefix": "~", "recipepaths": ["~/"], "globally": False, "configfiles": ["~/"], "dict_attr": {"a": 1}}, merge_value=False) self.assertEqual(self.basic_config.installprefix, "~/") self.assertEqual(self.basic_config.pkginfoprefix, "~") self.assertEqual(self.basic_config.recipepaths, ["~/"]) self.assertEqual(self.basic_config.globally, False) self.assertEqual(self.basic_config.configfiles, ["~/"]) self.assertEqual(self.basic_config.dict_attr, {"a": 1}) self.basic_config.update_config( config={"installprefix": "~/local", "pkginfoprefix": "~/local", "recipepaths": ["/usr", "/usr/bin"], "globally": True, "configfiles": ["~/", "~/abc"], "dict_attr": {"b": 2}}, merge_value=True) self.assertEqual(self.basic_config.installprefix, "~/local") self.assertEqual(self.basic_config.pkginfoprefix, "~/local") self.assertEqual( self.basic_config.recipepaths, ["~/", "/usr", "/usr/bin"]) self.assertEqual(self.basic_config.globally, True) self.assertEqual(self.basic_config.configfiles, ["~/", "~/", "~/abc"]) self.assertEqual(self.basic_config.dict_attr, {"a": 1, "b": 2})
gpl-3.0
5,959,843,345,818,618,000
47.568627
78
0.582963
false
kalikaneko/leap_mail
src/leap/mail/imap/mailbox.py
1
29101
# *- coding: utf-8 -*- # mailbox.py # Copyright (C) 2013 LEAP # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Soledad Mailbox. """ import copy import threading import logging import StringIO import cStringIO import os from collections import defaultdict from twisted.internet import defer from twisted.internet.task import deferLater from twisted.python import log from twisted.mail import imap4 from zope.interface import implements from leap.common import events as leap_events from leap.common.events.events_pb2 import IMAP_UNREAD_MAIL from leap.common.check import leap_assert, leap_assert_type from leap.mail.decorators import deferred_to_thread from leap.mail.utils import empty from leap.mail.imap.fields import WithMsgFields, fields from leap.mail.imap.messages import MessageCollection from leap.mail.imap.messageparts import MessageWrapper from leap.mail.imap.parser import MBoxParser logger = logging.getLogger(__name__) """ If the environment variable `LEAP_SKIPNOTIFY` is set, we avoid notifying clients of new messages. Use during stress tests. """ NOTIFY_NEW = not os.environ.get('LEAP_SKIPNOTIFY', False) class SoledadMailbox(WithMsgFields, MBoxParser): """ A Soledad-backed IMAP mailbox. Implements the high-level method needed for the Mailbox interfaces. The low-level database methods are contained in MessageCollection class, which we instantiate and make accessible in the `messages` attribute. """ implements( imap4.IMailbox, imap4.IMailboxInfo, imap4.ICloseableMailbox, imap4.ISearchableMailbox, imap4.IMessageCopier) # XXX should finish the implementation of IMailboxListener # XXX should completely implement ISearchableMailbox too messages = None _closed = False INIT_FLAGS = (WithMsgFields.SEEN_FLAG, WithMsgFields.ANSWERED_FLAG, WithMsgFields.FLAGGED_FLAG, WithMsgFields.DELETED_FLAG, WithMsgFields.DRAFT_FLAG, WithMsgFields.RECENT_FLAG, WithMsgFields.LIST_FLAG) flags = None CMD_MSG = "MESSAGES" CMD_RECENT = "RECENT" CMD_UIDNEXT = "UIDNEXT" CMD_UIDVALIDITY = "UIDVALIDITY" CMD_UNSEEN = "UNSEEN" # FIXME we should turn this into a datastructure with limited capacity _listeners = defaultdict(set) next_uid_lock = threading.Lock() def __init__(self, mbox, soledad, memstore, rw=1): """ SoledadMailbox constructor. Needs to get passed a name, plus a Soledad instance. :param mbox: the mailbox name :type mbox: str :param soledad: a Soledad instance. :type soledad: Soledad :param memstore: a MemoryStore instance :type memstore: MemoryStore :param rw: read-and-write flag for this mailbox :type rw: int """ leap_assert(mbox, "Need a mailbox name to initialize") leap_assert(soledad, "Need a soledad instance to initialize") # XXX should move to wrapper #leap_assert(isinstance(soledad._db, SQLCipherDatabase), #"soledad._db must be an instance of SQLCipherDatabase") self.mbox = self._parse_mailbox_name(mbox) self.rw = rw self._soledad = soledad self._memstore = memstore self.messages = MessageCollection( mbox=mbox, soledad=self._soledad, memstore=self._memstore) if not self.getFlags(): self.setFlags(self.INIT_FLAGS) if self._memstore: self.prime_known_uids_to_memstore() self.prime_last_uid_to_memstore() @property def listeners(self): """ Returns listeners for this mbox. The server itself is a listener to the mailbox. so we can notify it (and should!) after changes in flags and number of messages. :rtype: set """ return self._listeners[self.mbox] # TODO this grows too crazily when many instances are fired, like # during imaptest stress testing. Should have a queue of limited size # instead. def addListener(self, listener): """ Add a listener to the listeners queue. The server adds itself as a listener when there is a SELECT, so it can send EXIST commands. :param listener: listener to add :type listener: an object that implements IMailboxListener """ if not NOTIFY_NEW: return logger.debug('adding mailbox listener: %s' % listener) self.listeners.add(listener) def removeListener(self, listener): """ Remove a listener from the listeners queue. :param listener: listener to remove :type listener: an object that implements IMailboxListener """ self.listeners.remove(listener) # TODO move completely to soledadstore, under memstore reponsibility. def _get_mbox(self): """ Return mailbox document. :return: A SoledadDocument containing this mailbox, or None if the query failed. :rtype: SoledadDocument or None. """ try: query = self._soledad.get_from_index( fields.TYPE_MBOX_IDX, fields.TYPE_MBOX_VAL, self.mbox) if query: return query.pop() except Exception as exc: logger.exception("Unhandled error %r" % exc) def getFlags(self): """ Returns the flags defined for this mailbox. :returns: tuple of flags for this mailbox :rtype: tuple of str """ mbox = self._get_mbox() if not mbox: return None flags = mbox.content.get(self.FLAGS_KEY, []) return map(str, flags) def setFlags(self, flags): """ Sets flags for this mailbox. :param flags: a tuple with the flags :type flags: tuple of str """ leap_assert(isinstance(flags, tuple), "flags expected to be a tuple") mbox = self._get_mbox() if not mbox: return None mbox.content[self.FLAGS_KEY] = map(str, flags) self._soledad.put_doc(mbox) # XXX SHOULD BETTER IMPLEMENT ADD_FLAG, REMOVE_FLAG. def _get_closed(self): """ Return the closed attribute for this mailbox. :return: True if the mailbox is closed :rtype: bool """ mbox = self._get_mbox() return mbox.content.get(self.CLOSED_KEY, False) def _set_closed(self, closed): """ Set the closed attribute for this mailbox. :param closed: the state to be set :type closed: bool """ leap_assert(isinstance(closed, bool), "closed needs to be boolean") mbox = self._get_mbox() mbox.content[self.CLOSED_KEY] = closed self._soledad.put_doc(mbox) closed = property( _get_closed, _set_closed, doc="Closed attribute.") def _get_last_uid(self): """ Return the last uid for this mailbox. If we have a memory store, the last UID will be the highest recorded UID in the message store, or a counter cached from the mailbox document in soledad if this is higher. :return: the last uid for messages in this mailbox :rtype: int """ last = self._memstore.get_last_uid(self.mbox) logger.debug("last uid for %s: %s (from memstore)" % ( repr(self.mbox), last)) return last last_uid = property( _get_last_uid, doc="Last_UID attribute.") def prime_last_uid_to_memstore(self): """ Prime memstore with last_uid value """ set_exist = set(self.messages.all_uid_iter()) last = max(set_exist) if set_exist else 0 logger.info("Priming Soledad last_uid to %s" % (last,)) self._memstore.set_last_soledad_uid(self.mbox, last) def prime_known_uids_to_memstore(self): """ Prime memstore with the set of all known uids. We do this to be able to filter the requests efficiently. """ known_uids = self.messages.all_soledad_uid_iter() self._memstore.set_known_uids(self.mbox, known_uids) def getUIDValidity(self): """ Return the unique validity identifier for this mailbox. :return: unique validity identifier :rtype: int """ mbox = self._get_mbox() return mbox.content.get(self.CREATED_KEY, 1) def getUID(self, message): """ Return the UID of a message in the mailbox .. note:: this implementation does not make much sense RIGHT NOW, but in the future will be useful to get absolute UIDs from message sequence numbers. :param message: the message uid :type message: int :rtype: int """ msg = self.messages.get_msg_by_uid(message) return msg.getUID() def getUIDNext(self): """ Return the likely UID for the next message added to this mailbox. Currently it returns the higher UID incremented by one. We increment the next uid *each* time this function gets called. In this way, there will be gaps if the message with the allocated uid cannot be saved. But that is preferable to having race conditions if we get to parallel message adding. :rtype: int """ with self.next_uid_lock: if self._memstore: return self.last_uid + 1 else: # XXX after lock, it should be safe to # return just the increment here, and # have a different method that actually increments # the counter when really adding. self.last_uid += 1 return self.last_uid def getMessageCount(self): """ Returns the total count of messages in this mailbox. :rtype: int """ return self.messages.count() def getUnseenCount(self): """ Returns the number of messages with the 'Unseen' flag. :return: count of messages flagged `unseen` :rtype: int """ return self.messages.count_unseen() def getRecentCount(self): """ Returns the number of messages with the 'Recent' flag. :return: count of messages flagged `recent` :rtype: int """ return self.messages.count_recent() def isWriteable(self): """ Get the read/write status of the mailbox. :return: 1 if mailbox is read-writeable, 0 otherwise. :rtype: int """ return self.rw def getHierarchicalDelimiter(self): """ Returns the character used to delimite hierarchies in mailboxes. :rtype: str """ return '/' def requestStatus(self, names): """ Handles a status request by gathering the output of the different status commands. :param names: a list of strings containing the status commands :type names: iter """ r = {} if self.CMD_MSG in names: r[self.CMD_MSG] = self.getMessageCount() if self.CMD_RECENT in names: r[self.CMD_RECENT] = self.getRecentCount() if self.CMD_UIDNEXT in names: r[self.CMD_UIDNEXT] = self.last_uid + 1 if self.CMD_UIDVALIDITY in names: r[self.CMD_UIDVALIDITY] = self.getUIDValidity() if self.CMD_UNSEEN in names: r[self.CMD_UNSEEN] = self.getUnseenCount() return defer.succeed(r) def addMessage(self, message, flags, date=None): """ Adds a message to this mailbox. :param message: the raw message :type message: str :param flags: flag list :type flags: list of str :param date: timestamp :type date: str :return: a deferred that evals to None """ # TODO have a look at the cases for internal date in the rfc if isinstance(message, (cStringIO.OutputType, StringIO.StringIO)): message = message.getvalue() # XXX we could treat the message as an IMessage from here leap_assert_type(message, basestring) if flags is None: flags = tuple() else: flags = tuple(str(flag) for flag in flags) d = self._do_add_message(message, flags=flags, date=date) return d def _do_add_message(self, message, flags, date): """ Calls to the messageCollection add_msg method. Invoked from addMessage. """ d = self.messages.add_msg(message, flags=flags, date=date) # XXX Removing notify temporarily. # This is interfering with imaptest results. I'm not clear if it's # because we clutter the logging or because the set of listeners is # ever-growing. We should come up with some smart way of dealing with # it, or maybe just disabling it using an environmental variable since # we will only have just a few listeners in the regular desktop case. #d.addCallback(self.notify_new) return d def notify_new(self, *args): """ Notify of new messages to all the listeners. :param args: ignored. """ if not NOTIFY_NEW: return exists = self.getMessageCount() recent = self.getRecentCount() logger.debug("NOTIFY: there are %s messages, %s recent" % ( exists, recent)) for l in self.listeners: logger.debug('notifying...') l.newMessages(exists, recent) # commands, do not rename methods def destroy(self): """ Called before this mailbox is permanently deleted. Should cleanup resources, and set the \\Noselect flag on the mailbox. """ self.setFlags((self.NOSELECT_FLAG,)) self.deleteAllDocs() # XXX removing the mailbox in situ for now, # we should postpone the removal # XXX move to memory store?? self._soledad.delete_doc(self._get_mbox()) def _close_cb(self, result): self.closed = True def close(self): """ Expunge and mark as closed """ d = self.expunge() d.addCallback(self._close_cb) return d def expunge(self): """ Remove all messages flagged \\Deleted """ if not self.isWriteable(): raise imap4.ReadOnlyMailbox d = defer.Deferred() self._memstore.expunge(self.mbox, d) return d def _bound_seq(self, messages_asked): """ Put an upper bound to a messages sequence if this is open. :param messages_asked: IDs of the messages. :type messages_asked: MessageSet :rtype: MessageSet """ if not messages_asked.last: try: iter(messages_asked) except TypeError: # looks like we cannot iterate try: messages_asked.last = self.last_uid except ValueError: pass return messages_asked def _filter_msg_seq(self, messages_asked): """ Filter a message sequence returning only the ones that do exist in the collection. :param messages_asked: IDs of the messages. :type messages_asked: MessageSet :rtype: set """ set_asked = set(messages_asked) set_exist = set(self.messages.all_uid_iter()) seq_messg = set_asked.intersection(set_exist) return seq_messg @deferred_to_thread #@profile def fetch(self, messages_asked, uid): """ Retrieve one or more messages in this mailbox. from rfc 3501: The data items to be fetched can be either a single atom or a parenthesized list. :param messages_asked: IDs of the messages to retrieve information about :type messages_asked: MessageSet :param uid: If true, the IDs are UIDs. They are message sequence IDs otherwise. :type uid: bool :rtype: A tuple of two-tuples of message sequence numbers and LeapMessage """ # For the moment our UID is sequential, so we # can treat them all the same. # Change this to the flag that twisted expects when we # switch to content-hash based index + local UID table. sequence = False #sequence = True if uid == 0 else False messages_asked = self._bound_seq(messages_asked) seq_messg = self._filter_msg_seq(messages_asked) getmsg = lambda uid: self.messages.get_msg_by_uid(uid) # for sequence numbers (uid = 0) if sequence: logger.debug("Getting msg by index: INEFFICIENT call!") raise NotImplementedError else: result = ((msgid, getmsg(msgid)) for msgid in seq_messg) return result @deferred_to_thread def fetch_flags(self, messages_asked, uid): """ A fast method to fetch all flags, tricking just the needed subset of the MIME interface that's needed to satisfy a generic FLAGS query. Given how LEAP Mail is supposed to work without local cache, this query is going to be quite common, and also we expect it to be in the form 1:* at the beginning of a session, so it's not bad to fetch all the FLAGS docs at once. :param messages_asked: IDs of the messages to retrieve information about :type messages_asked: MessageSet :param uid: If true, the IDs are UIDs. They are message sequence IDs otherwise. :type uid: bool :return: A tuple of two-tuples of message sequence numbers and flagsPart, which is a only a partial implementation of MessagePart. :rtype: tuple """ class flagsPart(object): def __init__(self, uid, flags): self.uid = uid self.flags = flags def getUID(self): return self.uid def getFlags(self): return map(str, self.flags) messages_asked = self._bound_seq(messages_asked) seq_messg = self._filter_msg_seq(messages_asked) all_flags = self.messages.all_flags() result = ((msgid, flagsPart( msgid, all_flags.get(msgid, tuple()))) for msgid in seq_messg) return result @deferred_to_thread def fetch_headers(self, messages_asked, uid): """ A fast method to fetch all headers, tricking just the needed subset of the MIME interface that's needed to satisfy a generic HEADERS query. Given how LEAP Mail is supposed to work without local cache, this query is going to be quite common, and also we expect it to be in the form 1:* at the beginning of a session, so **MAYBE** it's not too bad to fetch all the HEADERS docs at once. :param messages_asked: IDs of the messages to retrieve information about :type messages_asked: MessageSet :param uid: If true, the IDs are UIDs. They are message sequence IDs otherwise. :type uid: bool :return: A tuple of two-tuples of message sequence numbers and headersPart, which is a only a partial implementation of MessagePart. :rtype: tuple """ class headersPart(object): def __init__(self, uid, headers): self.uid = uid self.headers = headers def getUID(self): return self.uid def getHeaders(self, _): return dict( (str(key), str(value)) for key, value in self.headers.items()) messages_asked = self._bound_seq(messages_asked) seq_messg = self._filter_msg_seq(messages_asked) all_chash = self.messages.all_flags_chash() all_headers = self.messages.all_headers() result = ((msgid, headersPart( msgid, all_headers.get(all_chash.get(msgid, 'nil'), {}))) for msgid in seq_messg) return result def signal_unread_to_ui(self, *args, **kwargs): """ Sends unread event to ui. :param args: ignored :param kwargs: ignored """ unseen = self.getUnseenCount() leap_events.signal(IMAP_UNREAD_MAIL, str(unseen)) def store(self, messages_asked, flags, mode, uid): """ Sets the flags of one or more messages. :param messages: The identifiers of the messages to set the flags :type messages: A MessageSet object with the list of messages requested :param flags: The flags to set, unset, or add. :type flags: sequence of str :param mode: If mode is -1, these flags should be removed from the specified messages. If mode is 1, these flags should be added to the specified messages. If mode is 0, all existing flags should be cleared and these flags should be added. :type mode: -1, 0, or 1 :param uid: If true, the IDs specified in the query are UIDs; otherwise they are message sequence IDs. :type uid: bool :return: A deferred, that will be called with a dict mapping message sequence numbers to sequences of str representing the flags set on the message after this operation has been performed. :rtype: deferred :raise ReadOnlyMailbox: Raised if this mailbox is not open for read-write. """ from twisted.internet import reactor if not self.isWriteable(): log.msg('read only mailbox!') raise imap4.ReadOnlyMailbox d = defer.Deferred() deferLater(reactor, 0, self._do_store, messages_asked, flags, mode, uid, d) return d def _do_store(self, messages_asked, flags, mode, uid, observer): """ Helper method, invoke set_flags method in the MessageCollection. See the documentation for the `store` method for the parameters. :param observer: a deferred that will be called with the dictionary mapping UIDs to flags after the operation has been done. :type observer: deferred """ # XXX implement also sequence (uid = 0) # XXX we should prevent cclient from setting Recent flag? leap_assert(not isinstance(flags, basestring), "flags cannot be a string") flags = tuple(flags) messages_asked = self._bound_seq(messages_asked) seq_messg = self._filter_msg_seq(messages_asked) self.messages.set_flags(self.mbox, seq_messg, flags, mode, observer) # ISearchableMailbox def search(self, query, uid): """ Search for messages that meet the given query criteria. Warning: this is half-baked, and it might give problems since it offers the SearchableInterface. We'll be implementing it asap. :param query: The search criteria :type query: list :param uid: If true, the IDs specified in the query are UIDs; otherwise they are message sequence IDs. :type uid: bool :return: A list of message sequence numbers or message UIDs which match the search criteria or a C{Deferred} whose callback will be invoked with such a list. :rtype: C{list} or C{Deferred} """ # TODO see if we can raise w/o interrupting flow #:raise IllegalQueryError: Raised when query is not valid. # example query: # ['UNDELETED', 'HEADER', 'Message-ID', # '[email protected]'] # TODO hardcoding for now! -- we'll support generic queries later on # but doing a quickfix for avoiding duplicat saves in the draft folder. # See issue #4209 if len(query) > 2: if query[1] == 'HEADER' and query[2].lower() == "message-id": msgid = str(query[3]).strip() d = self.messages._get_uid_from_msgid(str(msgid)) d1 = defer.gatherResults([d]) # we want a list, so return it all the same return d1 # nothing implemented for any other query logger.warning("Cannot process query: %s" % (query,)) return [] # IMessageCopier def copy(self, message): """ Copy the given message object into this mailbox. :param message: an IMessage implementor :type message: LeapMessage :return: a deferred that will be fired with the message uid when the copy succeed. :rtype: Deferred """ from twisted.internet import reactor d = defer.Deferred() # XXX this should not happen ... track it down, # probably to FETCH... if message is None: log.msg("BUG: COPY found a None in passed message") d.callback(None) deferLater(reactor, 0, self._do_copy, message, d) return d def _do_copy(self, message, observer): """ Call invoked from the deferLater in `copy`. This will copy the flags and header documents, and pass them to the `create_message` method in the MemoryStore, together with the observer deferred that we've been passed along. :param message: an IMessage implementor :type message: LeapMessage :param observer: the deferred that will fire with the UID of the message :type observer: Deferred """ # XXX for clarity, this could be delegated to a # MessageCollection mixin that implements copy too, and # moved out of here. msg = message memstore = self._memstore # XXX should use a public api instead fdoc = msg._fdoc hdoc = msg._hdoc if not fdoc: logger.warning("Tried to copy a MSG with no fdoc") return new_fdoc = copy.deepcopy(fdoc.content) fdoc_chash = new_fdoc[fields.CONTENT_HASH_KEY] # XXX is this hitting the db??? --- probably. # We should profile after the pre-fetch. dest_fdoc = memstore.get_fdoc_from_chash( fdoc_chash, self.mbox) exist = dest_fdoc and not empty(dest_fdoc.content) if exist: # Should we signal error on the callback? logger.warning("Destination message already exists!") # XXX I'm still not clear if we should raise the # errback. This actually rases an ugly warning # in some muas like thunderbird. I guess the user does # not deserve that. observer.callback(True) else: mbox = self.mbox uid_next = memstore.increment_last_soledad_uid(mbox) new_fdoc[self.UID_KEY] = uid_next new_fdoc[self.MBOX_KEY] = mbox # FIXME set recent! self._memstore.create_message( self.mbox, uid_next, MessageWrapper( new_fdoc, hdoc.content), observer=observer, notify_on_disk=False) # convenience fun def deleteAllDocs(self): """ Delete all docs in this mailbox """ docs = self.messages.get_all_docs() for doc in docs: self.messages._soledad.delete_doc(doc) def unset_recent_flags(self, uids): """ Unset Recent flag for a sequence of UIDs. """ seq_messg = self._bound_seq(uids) self.messages.unset_recent_flags(seq_messg) def __repr__(self): """ Representation string for this mailbox. """ return u"<SoledadMailbox: mbox '%s' (%s)>" % ( self.mbox, self.messages.count())
gpl-3.0
1,271,742,720,128,985,300
32.069318
79
0.595821
false
maxive/erp
addons/event_sale/models/sale_order.py
1
3358
# -*- coding: utf-8 -*- from odoo import api, fields, models class SaleOrder(models.Model): _inherit = "sale.order" @api.multi def action_confirm(self): self.ensure_one() res = super(SaleOrder, self).action_confirm() # confirm registration if it was free (otherwise it will be confirmed once invoice fully paid) self.order_line._update_registrations(confirm=self.amount_total == 0, cancel_to_draft=False) if any(self.order_line.filtered(lambda line: line.event_id)): return self.env['ir.actions.act_window'].with_context(default_sale_order_id=self.id).for_xml_id('event_sale', 'action_sale_order_event_registration') return res class SaleOrderLine(models.Model): _inherit = 'sale.order.line' event_id = fields.Many2one('event.event', string='Event', help="Choose an event and it will automatically create a registration for this event.") event_ticket_id = fields.Many2one('event.event.ticket', string='Event Ticket', help="Choose " "an event ticket and it will automatically create a registration for this event ticket.") event_ok = fields.Boolean(related='product_id.event_ok', readonly=True) @api.multi def _prepare_invoice_line(self, qty): self.ensure_one() res = super(SaleOrderLine, self)._prepare_invoice_line(qty) if self.event_id: res['name'] = '%s: %s' % (res.get('name', ''), self.event_id.name) return res @api.multi def _update_registrations(self, confirm=True, cancel_to_draft=False, registration_data=None): """ Create or update registrations linked to a sales order line. A sale order line has a product_uom_qty attribute that will be the number of registrations linked to this line. This method update existing registrations and create new one for missing one. """ Registration = self.env['event.registration'] registrations = Registration.search([('sale_order_line_id', 'in', self.ids), ('state', '!=', 'cancel')]) for so_line in self.filtered('event_id'): existing_registrations = registrations.filtered(lambda self: self.sale_order_line_id.id == so_line.id) if confirm: existing_registrations.filtered(lambda self: self.state != 'open').confirm_registration() if cancel_to_draft: existing_registrations.filtered(lambda self: self.state == 'cancel').do_draft() for count in range(int(so_line.product_uom_qty) - len(existing_registrations)): registration = {} if registration_data: registration = registration_data.pop() # TDE CHECK: auto confirmation registration['sale_order_line_id'] = so_line Registration.with_context(registration_force_draft=True).create( Registration._prepare_attendee_values(registration)) return True @api.onchange('event_ticket_id') def _onchange_event_ticket_id(self): company = self.event_id.company_id or self.env.user.company_id currency = company.currency_id self.price_unit = currency._convert( self.event_ticket_id.price, self.order_id.currency_id, self.order_id.company_id, self.order_id.date_order or fields.Date.today())
agpl-3.0
-8,398,201,484,374,660,000
48.382353
161
0.650387
false
lem9/weblate
weblate/trans/tests/test_models.py
1
12835
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2017 Michal Čihař <[email protected]> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # """Test for translation models.""" import shutil import os from django.core.management.color import no_style from django.db import connection from django.test import TestCase, LiveServerTestCase from django.test.utils import override_settings from django.utils import timezone from django.contrib.auth.models import User, Group from django.core.exceptions import ValidationError from weblate.trans.models import ( Project, Source, Unit, WhiteboardMessage, Check, ComponentList, AutoComponentList, get_related_units, ) import weblate.trans.models.subproject from weblate.lang.models import Language from weblate.permissions.helpers import can_access_project from weblate.trans.tests.utils import get_test_file, RepoTestMixin def fixup_languages_seq(): # Reset sequence for Language objects as # we're manipulating with them in FixtureTestCase.setUpTestData # and that seems to affect sequence for other tests as well # on some PostgreSQL versions (probably sequence is not rolled back # in a transaction). commands = connection.ops.sequence_reset_sql(no_style(), [Language]) if commands: with connection.cursor() as cursor: for sql in commands: cursor.execute(sql) class BaseTestCase(TestCase): @classmethod def setUpTestData(cls): fixup_languages_seq() class BaseLiveServerTestCase(LiveServerTestCase): @classmethod def setUpTestData(cls): fixup_languages_seq() class RepoTestCase(BaseTestCase, RepoTestMixin): """Generic class for tests working with repositories.""" def setUp(self): self.clone_test_repos() class ProjectTest(RepoTestCase): """Project object testing.""" def test_create(self): project = self.create_project() self.assertTrue(os.path.exists(project.get_path())) self.assertTrue(project.slug in project.get_path()) def test_rename(self): project = self.create_project() old_path = project.get_path() self.assertTrue(os.path.exists(old_path)) project.slug = 'changed' project.save() new_path = project.get_path() self.addCleanup(shutil.rmtree, new_path, True) self.assertFalse(os.path.exists(old_path)) self.assertTrue(os.path.exists(new_path)) def test_delete(self): project = self.create_project() self.assertTrue(os.path.exists(project.get_path())) project.delete() self.assertFalse(os.path.exists(project.get_path())) def test_delete_all(self): project = self.create_project() self.assertTrue(os.path.exists(project.get_path())) Project.objects.all().delete() self.assertFalse(os.path.exists(project.get_path())) def test_wrong_path(self): project = self.create_project() with override_settings(DATA_DIR='/weblate-nonexisting-path'): # Invalidate cache, pylint: disable=W0212 project._dir_path = None self.assertRaisesMessage( ValidationError, 'Could not create project directory', project.full_clean ) def test_acl(self): """Test for ACL handling.""" # Create user to verify ACL user = User.objects.create_user( 'testuser', '[email protected]', 'testpassword' ) # Create project project = self.create_project() # Enable ACL project.enable_acl = True project.save() # Check user does not have access self.assertFalse(can_access_project(user, project)) # Add to ACL group user.groups.add(Group.objects.get(name='Test@Translate')) # Need to fetch user again to clear permission cache user = User.objects.get(username='testuser') # We now should have access self.assertTrue(can_access_project(user, project)) class TranslationTest(RepoTestCase): """Translation testing.""" def test_basic(self): project = self.create_subproject() translation = project.translation_set.get(language_code='cs') self.assertEqual(translation.translated, 0) self.assertEqual(translation.total, 4) self.assertEqual(translation.fuzzy, 0) def test_extra_file(self): """Test extra commit file handling.""" subproject = self.create_subproject() subproject.pre_commit_script = get_test_file('hook-generate-mo') weblate.trans.models.subproject.PRE_COMMIT_SCRIPT_CHOICES.append( (subproject.pre_commit_script, 'hook-generate-mo') ) subproject.pre_commit_script = get_test_file('hook-update-linguas') weblate.trans.models.subproject.PRE_COMMIT_SCRIPT_CHOICES.append( (subproject.pre_commit_script, 'hook-update-linguas') ) subproject.extra_commit_file = 'po/%(language)s.mo\npo/LINGUAS' subproject.save() subproject.full_clean() translation = subproject.translation_set.get(language_code='cs') # change backend file with open(translation.get_filename(), 'a') as handle: handle.write(' ') # Test committing translation.git_commit( None, 'TEST <[email protected]>', timezone.now(), force_commit=True ) self.assertFalse(translation.repo_needs_commit()) linguas = os.path.join(subproject.get_path(), 'po', 'LINGUAS') with open(linguas, 'r') as handle: data = handle.read() self.assertIn('\ncs\n', data) self.assertFalse(translation.repo_needs_commit()) def test_validation(self): """Translation validation""" project = self.create_subproject() translation = project.translation_set.get(language_code='cs') translation.full_clean() def test_update_stats(self): """Check update stats with no units.""" project = self.create_subproject() translation = project.translation_set.get(language_code='cs') translation.update_stats() translation.unit_set.all().delete() translation.update_stats() class ComponentListTest(RepoTestCase): """Test(s) for ComponentList model.""" def test_slug(self): """Test ComponentList slug.""" clist = ComponentList() clist.slug = 'slug' self.assertEqual(clist.tab_slug(), 'list-slug') def test_auto(self): self.create_subproject() clist = ComponentList.objects.create( name='Name', slug='slug' ) AutoComponentList.objects.create( project_match='^.*$', component_match='^.*$', componentlist=clist ) self.assertEqual( clist.components.count(), 1 ) def test_auto_create(self): clist = ComponentList.objects.create( name='Name', slug='slug' ) AutoComponentList.objects.create( project_match='^.*$', component_match='^.*$', componentlist=clist ) self.assertEqual( clist.components.count(), 0 ) self.create_subproject() self.assertEqual( clist.components.count(), 1 ) def test_auto_nomatch(self): self.create_subproject() clist = ComponentList.objects.create( name='Name', slug='slug' ) AutoComponentList.objects.create( project_match='^none$', component_match='^.*$', componentlist=clist ) self.assertEqual( clist.components.count(), 0 ) class ModelTestCase(RepoTestCase): def setUp(self): super(ModelTestCase, self).setUp() self.subproject = self.create_subproject() class SourceTest(ModelTestCase): """Source objects testing.""" def test_exists(self): self.assertTrue(Source.objects.exists()) def test_source_info(self): unit = Unit.objects.all()[0] self.assertIsNotNone(unit.source_info) def test_priority(self): unit = Unit.objects.all()[0] self.assertEqual(unit.priority, 100) source = unit.source_info source.priority = 200 source.save() unit2 = Unit.objects.get(pk=unit.pk) self.assertEqual(unit2.priority, 200) def test_check_flags(self): """Setting of Source check_flags changes checks for related units.""" self.assertEqual(Check.objects.count(), 3) check = Check.objects.all()[0] unit = get_related_units(check)[0] source = unit.source_info source.check_flags = 'ignore-{0}'.format(check.check) source.save() self.assertEqual(Check.objects.count(), 0) class UnitTest(ModelTestCase): @override_settings(MT_WEBLATE_LIMIT=15) def test_more_like(self): unit = Unit.objects.all()[0] self.assertEqual(Unit.objects.more_like_this(unit).count(), 0) @override_settings(MT_WEBLATE_LIMIT=0) def test_more_like_timeout(self): unit = Unit.objects.all()[0] self.assertRaisesMessage( Exception, 'Request for more like {0} timed out.'.format(unit.pk), Unit.objects.more_like_this, unit ) @override_settings(MT_WEBLATE_LIMIT=-1) def test_more_like_no_fork(self): unit = Unit.objects.all()[0] self.assertEqual(Unit.objects.more_like_this(unit).count(), 0) class WhiteboardMessageTest(ModelTestCase): """Test(s) for WhiteboardMessage model.""" def setUp(self): super(WhiteboardMessageTest, self).setUp() WhiteboardMessage.objects.create( language=Language.objects.get(code='cs'), message='test cs', ) WhiteboardMessage.objects.create( language=Language.objects.get(code='de'), message='test de', ) WhiteboardMessage.objects.create( project=self.subproject.project, message='test project', ) WhiteboardMessage.objects.create( subproject=self.subproject, project=self.subproject.project, message='test subproject', ) WhiteboardMessage.objects.create( message='test global', ) def verify_filter(self, messages, count, message=None): """ Verifies whether messages have given count and first contains string. """ self.assertEqual(len(messages), count) if message is not None: self.assertEqual(messages[0].message, message) def test_contextfilter_global(self): self.verify_filter( WhiteboardMessage.objects.context_filter(), 1, 'test global' ) def test_contextfilter_project(self): self.verify_filter( WhiteboardMessage.objects.context_filter( project=self.subproject.project, ), 1, 'test project' ) def test_contextfilter_subproject(self): self.verify_filter( WhiteboardMessage.objects.context_filter( subproject=self.subproject, ), 2 ) def test_contextfilter_translation(self): self.verify_filter( WhiteboardMessage.objects.context_filter( subproject=self.subproject, language=Language.objects.get(code='cs'), ), 3, ) def test_contextfilter_language(self): self.verify_filter( WhiteboardMessage.objects.context_filter( language=Language.objects.get(code='cs'), ), 1, 'test cs' ) self.verify_filter( WhiteboardMessage.objects.context_filter( language=Language.objects.get(code='de'), ), 1, 'test de' )
gpl-3.0
6,715,203,728,136,511,000
31.160401
77
0.616661
false
yekaylee/capstone
capstoneRepo/SpeakerRecognition/Python/speakerTest.py
2
6969
# SpeakerTest.py # Group 7 ECE 4900 # Edward Reehorst w/ help from # http://minhdo.ece.illinois.edu/teaching/speaker_recognition/ # Text independent speaker recognition system based on mel frequency coeffiecient # features and vector quantization import numpy as np import scipy.fftpack as fft import scipy.io.wavfile as wav import math # DISTEU Pairwise Euclidean distances between columns of two matrices # # Input: # x, y: Two matrices whose each column is an a vector data. # # Output: # d: Element d(i,j) will be the Euclidean distance between two # column vectors X(:,i) and Y(:,j) # # Note: # The Euclidean distance D between two vectors X and Y is: # D = sum((x-y).^2).^0.5 def disteu(x, y): [M, N] = x.shape [M2, P] = y.shape if M != M2: print 'Matrix dimensions do not match.' return -1 d = np.zeros((N, P)) for n in range(0,N): for p in range(0,P): d[n,p] = np.sum(np.power(x[:,n]-y[:,p],2),axis=0) d = np.power(d,0.5) return d # VQLBG Vector quantization using the Linde-Buzo-Gray algorithm # # Inputs: # d contains training data vectors (one per column) # k is number of centroids required # # Outputs: # c contains the result VQ codebook (k columns, one for each centroids) def vqlbg(d, k): # Constants split = 0.1; sigma = 0.001; #Initial Codebook of one entry contains single centroid c = np.mean(d, axis=1); c = c[:,np.newaxis] m = 1; # Continue while m < k : # (Randomly) Split into two codebooks c = np.concatenate((np.multiply(c,(1 + split)), np.multiply(c,(1 - split))),axis=1) m = 2*m Dpast = float("inf") D = 10000 while (Dpast - D)/D > sigma: Dpast = D; # Nearest Neighbor Search z = disteu(d, c); dist = np.amin(z, axis=1); ind = np.argmin(z, axis=1); D = np.mean(dist); # Update Centroids for j in range(0,m): c[:, j] = np.mean(d[:, ind==j], axis=1); return c; # FROM https://github.com/jameslyons/python_speech_features/blob/master/features/base.py def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None): """Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1) :param nfilt: the number of filters in the filterbank, default 20. :param nfft: the FFT size. Default is 512. :param samplerate: the samplerate of the signal we are working with. Affects mel spacing. :param lowfreq: lowest band edge of mel filters, default 0 Hz :param highfreq: highest band edge of mel filters, default samplerate/2 :returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter. """ highfreq= highfreq or samplerate/2 assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2" # compute points evenly spaced in mels lowmel = hz2mel(lowfreq) highmel = hz2mel(highfreq) melpoints = np.linspace(lowmel,highmel,nfilt+2) # our points are in Hz, but we use fft bins, so we have to convert # from Hz to fft bin number bin = np.floor((nfft+1)*mel2hz(melpoints)/samplerate) fbank = np.zeros([nfilt,nfft/2+1]) for j in xrange(0,nfilt): for i in xrange(int(bin[j]),int(bin[j+1])): fbank[j,i] = (i - bin[j])/(bin[j+1]-bin[j]) for i in xrange(int(bin[j+1]),int(bin[j+2])): fbank[j,i] = (bin[j+2]-i)/(bin[j+2]-bin[j+1]) return fbank # FROM https://github.com/jameslyons/python_speech_features/blob/master/features/base.py def hz2mel(hz): """Convert a value in Hertz to Mels :param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise. :returns: a value in Mels. If an array was passed in, an identical sized array is returned. """ return 2595 * np.log10(1+hz/700.0) # FROM https://github.com/jameslyons/python_speech_features/blob/master/features/base.py def mel2hz(mel): """Convert a value in Mels to Hertz :param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise. :returns: a value in Hertz. If an array was passed in, an identical sized array is returned. """ return 700*(10**(mel/2595.0)-1) def toMono(sig): if sig.ndim > 1: return sig[:,0] return sig # MFCC Calculate the mel frequencey cepstrum coefficients (MFCC) of a signal # # Inputs: # s : speech signal # fs : sample rate in Hz # # Outputs: # c : MFCC output, each column contains the MFCC's for one speech frame def mfcc(s, fs): #Constants N = 256 M = 100 P = 30 l = int(math.ceil((s.size-N+1)/M)) #Allocate c array c = np.zeros((P,l)); for x in range(0,l-1): #Frame start = x * M; frame = s[start:start+N]; #Window w = np.hamming(N) windFrame = frame * w #FFT frameFFT = np.fft.fft(windFrame) #Mel-Frequency Wrapping m = get_filterbanks(P,N,fs) n2 = math.floor(N/2) ms = np.dot(m , abs(np.power(frameFFT[0:n2+1],2))) #Last step, compute mel-frequency cepstrum coefficients c[:,x] = fft.dct(np.log(ms.clip(min=0.00001))); np.delete(c,0,0) # exclude 0'th order cepstral coefficient return c def train(traindir, n): # Speaker Recognition: Training Stage # # Input: # traindir : string name of directory contains all train sound files # n : number of train files in traindir # # Output: # code : trained VQ codebooks, code{i} for i-th speaker # # Note: # Sound files in traindir is supposed to be: # s1.wav, s2.wav, ..., sn.wav # Example: # >> code = train('C:\data\train\', 8); k = 8; # number of centroids required code = [] for i in range(1,n+1): # train a VQ codebook for each speaker file = "{0}s{1}.wav".format(traindir,i) print file [fs, s] = wav.read(file) s = toMono(s) v = mfcc(s, fs); # Compute MFCC's code.append(vqlbg(v, k)) # Train VQ codebook return code def test(testdir, n, code): # Speaker Recognition: Testing Stage # # Input: # testdir : string name of directory contains all test sound files # n : number of test files in testdir # code : codebooks of all trained speakers # # Note: # Sound files in testdir is supposed to be: # s1.wav, s2.wav, ..., sn.wav # # Example: # >> test('C:\data\test\', 8, code); for k in range(1,n+1): # read test sound file of each speaker file = '{0}s{1}.wav'.format(testdir, k) [fs, s] = wav.read(file) s = toMono(s) v = mfcc(s, fs) # Compute MFCC's distmin = float('inf') k1 = 0; for l in range(0,len(code)): # each trained codebook, compute distortion d = disteu(v, code[l]); dist = sum(np.amin(d,axis=1)) / d.shape[0] print "{0}\t{1}\t{2}".format(k,l+1,dist) print "\n" c = train("data/train/",12) test("data/test/",16,c)
bsd-3-clause
603,974,829,431,584,400
27.329268
104
0.630363
false
mhvis/solar
setup.py
1
1056
#!/usr/bin/env python from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="samil", version="2.1.0", author="Maarten Visscher", author_email="[email protected]", description="Samil Power inverter tool", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/mhvis/solar", packages=["samil"], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], # * 3.4 is required for paho-mqtt:tls_set() # * 3.5 is required for socket.socketpair() in Windows, only used for test cases # * 3.5 is required for socket.listen() optional backlog parameter # * CI only tests >=3.5 python_requires='>=3.5', entry_points={ "console_scripts": [ "samil = samil.cli:cli" ] }, install_requires=[ "paho-mqtt>=1.5.0", "click>=7.1.2", ] )
mit
-512,019,954,847,332,600
28.333333
84
0.60322
false
toros-astro/corral
tests/settings.py
1
2497
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ============================================================================= # IMPORTS # ============================================================================= import logging import os # ============================================================================= # CONF # ============================================================================= PATH = os.path.abspath(os.path.dirname(__file__)) DEBUG_PROCESS = True LOG_LEVEL = logging.WARNING PIPELINE_SETUP = "tests.pipeline.TestPipeline" CONNECTION = 'sqlite:///:memory:' LOADER = "tests.steps.TestLoader" STEPS = ["tests.steps.Step1", "tests.steps.Step2"] ALERTS = ["tests.alerts.Alert1"] SHELL_LOCALS = {"foo": 1} EMAIL = { "server": "smtp.foo.com:587", "tls": True, "user": "[email protected]", "password": "secret" } MIGRATIONS_SETTINGS = os.path.join(PATH, "migrations", "alembic.ini")
bsd-3-clause
-382,187,832,002,934,850
35.173913
79
0.654647
false
evandrix/Splat
code/mypkg/gui/GraphicsView.py
1
2008
import sys from PyQt4 import QtGui, QtCore class GraphicsView(QtGui.QGraphicsView): def __init__(self, pixmap, scene, parent, *args): print >> sys.stderr, "GraphicsView::init()" QtGui.QGraphicsView.__init__(self, scene) self.scene = scene self.pixmap = pixmap self.win = parent self.zoomLevel = 1.0 self.setupActions() QtCore.QMetaObject.connectSlotsByName(self) def setupActions(self): """ 1: zoom fit 2: zoom org """ zoom_fit = QtGui.QAction(self) zoom_fit.setShortcuts([QtGui.QKeySequence.fromString('1')]) zoom_fit.triggered.connect(self.zoom_fit) self.addAction(zoom_fit) zoom_org = QtGui.QAction(self) zoom_org.setShortcuts([QtGui.QKeySequence.fromString('2')]) zoom_org.triggered.connect(self.zoom_org) self.addAction(zoom_org) def zoom_fit(self, *ignore): print >> sys.stderr, "GraphicsView::zoom_fit(#1)" if self.pixmap: winSize, imgSize = self.size(), self.pixmap.size() hZoom = 1.0*winSize.width ()/imgSize.width () vZoom = 1.0*winSize.height()/imgSize.height() zoomLevel = min(hZoom, vZoom) scaleFactor = zoomLevel/self.zoomLevel self.scale(scaleFactor, scaleFactor) self.centerOn(winSize.width()/2, winSize.height()/2) self.zoomLevel = zoomLevel print >> sys.stderr, "GraphicsView::zoom_fit(#1, %f)" % self.zoomLevel def zoom_org(self, *ignore): print >> sys.stderr, "GraphicsView::zoom_org(#2)" scaleFactor = 1.0/self.zoomLevel self.scale(scaleFactor, scaleFactor) self.centerOn(self.size().width()/2, self.size().height()/2) self.zoomLevel = 1.0 print >> sys.stderr, "GraphicsView::zoom_org(#2, %f)" % self.zoomLevel def resizeEvent(self, event): print >> sys.stderr, "GraphicsView::resizeEvent()"
mit
1,430,356,780,869,776,100
37.615385
82
0.59512
false
sdlarsen1/CMPUT291
prjcode/Doctor.py
1
9331
import os from LogInOut import logout def doctorCommands(cursor, conn, staff_id): loggedOut = False while not loggedOut: os.system("clear") choice = int(raw_input('''Type integer value of desired task: 1. Get patient chart info. 2. Record a symptom. 3. Record a diagnosis. 4. Prescribe medication. 5. Log out\n''')) if choice == 1: getPatientChartInfo(cursor) raw_input("Press Enter to go back to menu.") # return to menu elif choice == 2: recordSymptom(cursor, conn, staff_id) raw_input("Press Enter to go back to menu.") # return to menu elif choice == 3: recordDiagnosis(cursor, conn, staff_id) raw_input("Press Enter to go back to menu.") # return to menu elif choice == 4: recordMedication(cursor, conn, staff_id) raw_input("Press Enter to go back to menu.") # return to menu else: loggedOut = logout() def getPatientChartInfo(cursor): # doctor 1, nurse 3 while 1: hcno = raw_input("Enter Patient Health Care Number >").lower() cursor.execute(''' select chart_id, edate from charts where hcno = ? order by adate; ''', (hcno,)) charts = cursor.fetchall() if len(charts) < 1: print "Patient #%s Does Not Exist" %hcno else: break print "Patient Charts for %s\nChart ID|Chart Status" %hcno for row in charts: if row[1] == None: status = "Open" else: status = "Closed" print "%-8s|%s" %(row[0],(status),) chartNotSelected = True while chartNotSelected: chart_id = raw_input("Select Chart Number >") for row in charts: if chart_id == row[0]: chartNotSelected = False break if chartNotSelected: print "Patient Chart #%s Does Not Exist" %chart_id cursor.execute(''' select staff_id, obs_date, symptom from symptoms where chart_id = ? order by obs_date; ''', (chart_id,)) symptoms = cursor.fetchall() cursor.execute(''' select staff_id, ddate, diagnosis from diagnoses where chart_id = ? order by ddate; ''', (chart_id,)) diagnoses = cursor.fetchall() cursor.execute(''' select staff_id, mdate, start_med, end_med, amount, drug_name from medications where chart_id = ? order by mdate; ''', (chart_id,)) meds = cursor.fetchall() print "Chart #%s for Patient #%s" %(chart_id ,hcno) print "Symptoms\nStaff ID|Observation Date |Symptom" for row in symptoms: print "%-8s|%-19s|%s" %(row[0],row[1],row[2]) print "----------------------------------------------" print "Diagnosis\nStaff ID|Diagnosis Date |Diagnosis" for row in diagnoses: print "%-8s|%-19s|%s" %(row[0],row[1],row[2]) print "----------------------------------------------" print "Medications\nStaff ID|Precsription Date |Med Start Date |Med End Date |Amount per day|Drug Name" for row in meds: print "%-8s|%-19s|%-19s|%-19s|%-14s|%s" %(row[0],row[1],row[2],row[3],row[4],row[5]) print "----------------------------------------------" def recordSymptom(cursor, conn, staff_id): # doctor 2, nurse 4 cursor.execute(''' select hcno, chart_id from charts where edate is Null; ''') patientCharts = cursor.fetchall() chartNotSelected = True patientNotSelected = True while patientNotSelected: hcno = raw_input("Enter Patient Health Care Number >") for row in patientCharts: if hcno == row[0]: patientNotSelected = False break if patientNotSelected: print "Patient #%s does not have an open chart" % hcno choice = raw_input("Enter 'quit' to exit task or enter anything to try another Health care number >").lower() if choice == 'quit': return False while chartNotSelected: chart_id = raw_input("Enter Patients Chart Number >") if (hcno, chart_id) in patientCharts: chartNotSelected = False else: print "Patient #%s does not have a chart #%s that is open" % (hcno, chart_id) choice = raw_input("Enter 'quit' to exit task or enter anything to try another chart number >").lower() if choice == 'quit': return False symptom = raw_input("Enter Patient Symptom >") cursor.execute(''' insert into symptoms values (?,?,?,datetime('now'),?); ''', (hcno, chart_id, staff_id, symptom,)) conn.commit() def recordDiagnosis(cursor, conn, staff_id): # doctor 3 cursor.execute(''' select hcno, chart_id from charts where edate is Null; ''') patientCharts = cursor.fetchall() chartNotSelected = True patientNotSelected = True while patientNotSelected: hcno = raw_input("Enter Patient Health Care Number >") for row in patientCharts: if hcno == row[0]: patientNotSelected = False break if patientNotSelected: print "Patient #%s does not have an open chart" %hcno choice = raw_input("Enter 'quit' to exit task or enter anything to try another Health care number >").lower() if choice == 'quit': return False while chartNotSelected: chart_id = raw_input("Enter Patients Chart Number >") if (hcno,chart_id) in patientCharts: chartNotSelected = False else: print "Patient #%s does not have a chart #%s that is open" %(hcno, chart_id) choice = raw_input("Enter 'quit' to exit task or enter anything to try another chart number >").lower() if choice == 'quit': return False diagnosis = raw_input("Enter Diagnosis >") cursor.execute(''' insert into diagnoses values (?,?,?,datetime('now'),?); ''', (hcno, chart_id, staff_id, diagnosis, )) conn.commit() def recordMedication(cursor, conn, staff_id): # doctor 4 cursor.execute(''' select hcno, chart_id from charts where edate is Null; ''') patientCharts = cursor.fetchall() chartNotSelected = True patientNotSelected = True while patientNotSelected: hcno = raw_input("Enter Patient Health Care Number >") for row in patientCharts: if hcno == row[0]: patientNotSelected = False break if patientNotSelected: print "Patient #%s does not have an open chart" %hcno choice = raw_input("Enter 'quit' to exit task or enter anything to try another Health care number >").lower() if choice == 'quit': return False while chartNotSelected: chart_id = raw_input("Enter Patients Chart Number >") if (hcno,chart_id) in patientCharts: chartNotSelected = False else: print "Patient #%s does not have a chart #%s that is open" %(hcno,chart_id) choice = raw_input("Enter 'quit' to exit task or enter anything to try another chart number >").lower() if choice == 'quit': return False medication = raw_input("Enter Drug Name >").lower() cursor.execute(''' select lower(drug_name) from reportedallergies where hcno = ? ; ''', (hcno,)) directAllergies = cursor.fetchall() cursor.execute(''' select lower(r.drug_name), lower(i.canbe_alg) from reportedallergies r, inferredallergies i where r.hcno = ? and r.drug_name = i.alg ''', (hcno,)) inferredAllergies = cursor.fetchall() if medication in directAllergies: print "Warning Patient is allergic to %s" %medication # override = raw_input("Do you wish to procede") for row in inferredAllergies: if medication == row[1]: print "Warning Patient is allergic to %s and therefore could be allergic to %s" %(row[0], row[1]) # maybe select a new med break cursor.execute(''' select d.sug_amount from dosage d, patients p where p.hcno = ? and lower(d.drug_name) = ? and d.age_group = p.age_group; ''', (hcno,medication,)) sugestedDosage = cursor.fetchall() while 1: amount = raw_input("Enter dosage amount >") if amount > str(sugestedDosage[0][0]): print "Amount entered is greater then the recommended dosage of %s for this Patients age group" %sugestedDosage[0][0] procede = raw_input("continue with this dosage(y) or enter new dosage(n) >").lower() if procede == 'y': break else: continue else: break numOfDays = raw_input("Enter the number of day for the medication >") daymodifer = str('+'+str(numOfDays)+' days') # (hcno, chart_id, staff_id, mdate, start_med, end_med, amount, drug_name) cursor.execute(''' insert into medications values (?,?,?,datetime('now'),datetime('now'),datetime('now', ?),?,?); ''', (hcno, chart_id, staff_id, daymodifer, amount, medication, )) conn.commit()
gpl-3.0
7,187,887,561,044,244,000
27.978261
129
0.574429
false
ArdanaCLM/ardana-service
ardana_service/packages.py
1
9927
# (c) Copyright 2018-2019 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .playbooks import run_playbook from .plays import get_metadata_file from flask import abort from flask import Blueprint from flask import jsonify from flask import request import itertools import json import os from os.path import dirname from os.path import exists from os.path import join from oslo_config import cfg from oslo_log import log as logging import re import subprocess from . import policy from time import sleep LOG = logging.getLogger(__name__) bp = Blueprint('packages', __name__) PKG_CACHE_FILE = cfg.CONF.paths.packages_cache HOST_PKGS_FILE = cfg.CONF.paths.packages_hosts_data PACKAGES_PLAY = "_ardana-service-get-pkgdata" @bp.route("/api/v2/packages", methods=['GET']) @policy.enforce('lifecycle:list_packages') def get_packages(): """Get installed venv packages and SUSE-Openstack-installed packages This caches the ardana and venv-openstack packages installed on the deployer and returns a list of ardana packages. .. :quickref: Packages; list ardana packages and openstack venv versions **Example Request**: .. sourcecode:: http GET /api/v2/packages HTTP/1.1 Content-Type: application/json **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK { "cloud_installed_packages": [{ "name": "ardana-logging", "versions": ["8.0+git.1531134017.565cede-102.1"] }, { "name": "ardana-nova", "versions": ["8.0+git.1528891405.336a954-103.6"] }, ... <and so on>], "openstack_venv_packages": [{ "available": "2.2.1-19.116", "installed": ["2.2.1-9.1", "2.2.1-19.116"], "name": "monasca" }, { "available": "9.0.2-19.124", "installed": ["9.0.2-19.124"], "name": "ceilometer" }, ... <and so on>] } """ if cfg.CONF.testing.use_mock: mock_json = "tools/packages.json" json_file = join(dirname(dirname(__file__)), mock_json) with open(json_file) as f: return jsonify(json.load(f)) installed_os_pkgs, os_pkg_cache = update_openstack_pkg_cache() # Run the playbook to get package data from all the hosts in the model proc_info = {} try: vars = { "extra-vars": { "host_pkgs_file": HOST_PKGS_FILE } } # encrypt is needed to run playbook if cloud config is encrypted. # It is passed in as a header because there is no body in HTTP GET # API. encrypt = request.headers.get('encrypt') if encrypt: vars['extra-vars']['encrypt'] = encrypt play_id = run_playbook(PACKAGES_PLAY, vars)["id"] # Poll for "code" and ignore its value because some hosts may be down. while 'code' not in proc_info: with open(get_metadata_file(play_id)) as f: proc_info = json.load(f) if 'code' not in proc_info: sleep(1) except Exception as e: LOG.error("Could not get remote package information: %s" % e) abort(404, "Remote package information unavailable") # host_pkgs example structure created by PACKAGES_PLAY playbook run: # { # "host1": { # # list of installed timestamped openstack venv packages on host1 # "ts_os_pkgs": [ # "barbican-20180820T201055Z", # "cinder-20180820T190043Z", ... # ], # # list of SUSE-Openstack-cloud packages installed on host1 # "zypper_cloud_pkgs": { # "python-PasteDeploy": "1.5.2-1.52", # "python-pymongo": "3.1.1-1.55", ... # } # }, # "host2": { ... } # } try: with open(HOST_PKGS_FILE) as f: host_pkgs = json.load(f) except Exception as e: LOG.error("Could not retrieve remote host pkg data from %s: %s" % (HOST_PKGS_FILE, e)) abort(404, "Remote package information unavailable") finally: if exists(HOST_PKGS_FILE): os.remove(HOST_PKGS_FILE) # Reconcile openstack timestamps to versions installed on each system all_ts_os_pkgs = [host['ts_os_pkgs'] for host in host_pkgs.values()] uniq_ts_pkgs = set(itertools.chain.from_iterable(all_ts_os_pkgs)) re_name_ts = re.compile(r'(?P<name>[\w-]+)-\d+T\d+Z') for pkg in uniq_ts_pkgs: pkg_match = re_name_ts.match(pkg) if not pkg_match: LOG.warning('Unrecognized package format: %s' % pkg) continue name = pkg_match.group('name') if not installed_os_pkgs.get(name): LOG.warning('Unrecognized service name: %s' % name) continue version = os_pkg_cache.get(pkg) if version: installed_os_pkgs[name]['installed'].append(version) else: # We don't know what version this is, so we'll just add # the timestamped package name in there (should never happen) installed_os_pkgs[name]['installed'].append(pkg) ovp = [ { 'name': k, 'installed': v['installed'], 'available': v['available'] } for k, v in installed_os_pkgs.items()] # Create a list of unique SUSE-Openstack installed packages across all # systems pkgs_dict = {} for host in host_pkgs.values(): for name, version in host['zypper_cloud_pkgs'].iteritems(): if name not in pkgs_dict: pkgs_dict[name] = [version] elif version not in pkgs_dict[name]: # this case might only occur during upgrade or partial upgrade pkgs_dict[name].append(version) cip = [ { 'name': name, 'versions': versions } for name, versions in pkgs_dict.items() ] response = { 'openstack_venv_packages': ovp, 'cloud_installed_packages': cip } return jsonify(response) def update_openstack_pkg_cache(): re_openstack = re.compile(r'venv-openstack-(?P<name>[\w-]+)-') # contains current AND OLD openstack packages where # k: timestamped package (i.e. monasca-20180820T190346Z) # v: version (i.e. 2.2.1-19.155) # This will build up over time with patches and upgrades os_pkg_cache = {} # contains current/available openstack packages installed on the deployer # k: openstack name (i.e. monasca) # v: version (i.e. 2.2.1-19.155) installed_os_pkgs = {} # Load package cache try: with open(PKG_CACHE_FILE) as f: os_pkg_cache = json.load(f) except Exception as e: LOG.info("Could not load %s: %s." % (PKG_CACHE_FILE, e)) # TODO(choyj): The code below could be simplified by using the zypper data # from the output of PACKAGES_PLAY. But we do not know which model host is # the deployer other than via educated guess (only deployer has venv pkgs # installed). So, for now: # See what openstack packages are installed on this deployer try: p = subprocess.Popen(['zypper', '--terse', 'packages', '--installed'], stdout=subprocess.PIPE) zyp_lines = p.communicate()[0].decode('utf-8').split('\n') except OSError: LOG.error("zypper unavailable or not working on this system") abort(503, 'zypper unavailable on this host') for line in zyp_lines: fields = line.split('|') # if this is a valid line and the package is installed if len(fields) == 5 and 'i' in fields[0]: name = fields[2].strip() vers = fields[3].strip() os_match = re_openstack.match(name) if os_match: # a venv-openstack package, therefore figure out timestamped # package to update/add to os_pkg_cache name_vers = "%s-%s" % (name, vers) try: p = subprocess.Popen( ['rpm', '--query', '--list', name_vers], stdout=subprocess.PIPE) rpm_lines = p.communicate()[0].split('\n') project = os_match.group('name') re_ts_pkg = \ re.compile(r"/(?P<name_ts>%s-\d+T\d+Z).tgz$" % project) for rpm_line in rpm_lines: ts_pkg_match = re_ts_pkg.search(rpm_line) if ts_pkg_match: os_pkg_cache[ts_pkg_match.group('name_ts')] = vers installed_os_pkgs[project] = { 'available': vers, 'installed': [] } break except OSError as e: LOG.warning("Could not determine timestamped package for" " %s: %s" % (name_vers, e)) # Save package cache try: with open(PKG_CACHE_FILE, 'w') as f: json.dump(os_pkg_cache, f, indent=4, sort_keys=True) except Exception as e: LOG.info("Could not save %s: %s." % (PKG_CACHE_FILE, e)) return installed_os_pkgs, os_pkg_cache
apache-2.0
-1,367,693,469,405,626,400
35.362637
79
0.56573
false
singingwolfboy/flask-dance
flask_dance/consumer/storage/session.py
1
1085
from flask_dance.consumer.storage import BaseStorage import flask class SessionStorage(BaseStorage): """ The default storage backend. Stores and retrieves OAuth tokens using the :ref:`Flask session <flask:sessions>`. """ def __init__(self, key="{bp.name}_oauth_token"): """ Args: key (str): The name to use as a key for storing the OAuth token in the Flask session. This string will have ``.format(bp=self.blueprint)`` called on it before it is used. so you can refer to information on the blueprint as part of the key. For example, ``{bp.name}`` will be replaced with the name of the blueprint. """ self.key = key def get(self, blueprint): key = self.key.format(bp=blueprint) return flask.session.get(key) def set(self, blueprint, token): key = self.key.format(bp=blueprint) flask.session[key] = token def delete(self, blueprint): key = self.key.format(bp=blueprint) del flask.session[key]
mit
3,112,623,158,917,967,400
32.90625
83
0.611982
false
silenteddie/Landsat8LST_SWA
__init__.py
1
1512
# -*- coding: utf-8 -*- """ /*************************************************************************** Landsat 8 LST Retriever (SWA) A QGIS plugin This module allows to retrieve Land Surface Temperature (LST) from Landsat 8 data (TIRS channels) using Split-Windows Algorithm. Water Vapor content and Land Surface Emissivity needed, but both could be recieved automatically in module (Water vapor via MODIS MOD09, LSE via NDVI). ------------------- begin : 2016-01-16 copyright : (C) 2016 by Eduard Kazakov email : [email protected] homepage : http://ekazakov.info ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ def classFactory(iface): from l8_lst_swa import L8_lst_swa return L8_lst_swa(iface)
gpl-2.0
8,634,993,758,528,174,000
51.137931
97
0.409392
false
gcrahay/python-wer
src/wer/helpers.py
1
1869
# -*- coding: utf8 -*- from __future__ import unicode_literals import time from datetime import datetime from eulxml import xmlmap def windows_to_unix_timestamp(windows_timestamp): """ Converts a Windows timestamp to Unix one :param windows_timestamp: Windows timestamp :type windows_timestamp: int :return: Unix timestamp :rtype: int """ magic_number = 11644473600 return int((windows_timestamp / 10000000) - magic_number) def unix_to_windows_timestamp(unix_timestamp): """ Converts a Windows timestamp to Unix one :param unix_timestamp: Unix timestamp :type unix_timestamp: int :return: Windows timestamp :rtype: int """ magic_number = 116444736000000000 return (unix_timestamp * 10000000) + magic_number class DateMapper(xmlmap.fields.DateTimeMapper): """ Custom mapper for WER date Converts XML timestamp to python :class:`datetime.datetime` """ def to_python(self, node): """ Converts internal Windows timestamp to Python :class:`datetime.datetime` :param node: XML node value :type node: basestring :return: Python datetime :rtype: :class:`datetime.datetime` """ return datetime.utcfromtimestamp(windows_to_unix_timestamp(int(node))) def to_xml(self, dt): """ Converts Windows timestamp :param dt: date and time to convert :return: Windows timestamp :rtype: int """ return unix_to_windows_timestamp(time.mktime(dt.timetuple())) class DateField(xmlmap.fields.Field): """ Custom date field Uses the custom date mapper """ def __init__(self, xpath): super(DateField, self).__init__(xpath, manager=xmlmap.fields.SingleNodeManager(), mapper=DateMapper())
bsd-2-clause
-5,375,953,554,604,258,000
25.323944
84
0.637239
false
DBrianKimmel/PyHouse
Project/src/Modules/House/Family/Reolink/reolink_device.py
1
1201
""" @name: /home/briank/workspace/PyHouse/Project/src/Modules/House/Family/Reolink/reolink_device.py @author: D. Brian Kimmel @contact: [email protected] @copyright: (c) 2013-2019 by D. Brian Kimmel @license: MIT License @note: Created on Jan 26, 2020 @summary: """ __updated__ = '2020-01-26' __version_info__ = (20, 1, 26) __version__ = '.'.join(map(str, __version_info__)) # Import system type stuff # Import PyMh files from Modules.Core.Utilities.debug_tools import PrettyFormatAny from Modules.Core import logging_pyh as Logger LOG = Logger.getLogger('PyHouse.reolink_device ') class Api: """ These are the public methods available to use Devices from any family. """ m_plm_list = [] m_hub_list = [] m_pyhouse_obj = None def __init__(self, p_pyhouse_obj): # p_pyhouse_obj.House._Commands['insteon'] = {} self.m_pyhouse_obj = p_pyhouse_obj LOG.info('Initialized') def LoadConfig(self): """ """ def Start(self): """ """ def SaveConfig(self): """ """ def Stop(self): _x = PrettyFormatAny.form(self.m_pyhouse_obj, 'pyhouse') # ## END DBK
mit
-2,093,959,987,469,830,700
21.240741
101
0.606994
false
perkinsbt/cse-6242
get word lists.py
1
1354
# -*- coding: utf-8 -*- """ Created on Tue Apr 12 14:41:27 2016 @author: Perk """ import requests import json from progressbar import ProgressBar pbar = ProgressBar() with open('sunlight API.txt','r') as api_file: api_key = api_file.read() dem_word_lst = [] rep_word_lst = [] phrase_url='http://capitolwords.org/api/1/phrases/party.json' phrase_param = {'apikey':api_key,'mincount':700} with open('total_phrases.txt') as phrases: phrase_set = phrases.read().split('\n') for ngram in pbar(phrase_set): phrase_param['phrase'] = ngram ngram_req = requests.get(phrase_url,params=phrase_param) ngram_req.raise_for_status() ngram_json = ngram_req.json() dem = 0.0 rep = 0.0 for line in ngram_json['results']: count = line['count'] party = line['party'] if party in ('R','Republican'): rep += count elif party in ('D','Democrat'): dem += count if (rep and dem) == 0.0: continue elif rep/(rep+dem) > 0.70: rep_word_lst.append((ngram,rep/(rep+dem))) elif dem/(dem+rep) > 0.70: dem_word_lst.append((ngram,dem/(dem+rep))) with open('rep_word_count.json','w') as rep_file: json.dump(rep_word_lst,rep_file) with open('dem_word_count.json','w') as dem_file: json.dump(dem_word_lst,dem_file)
mit
9,195,303,648,708,260,000
30.285714
61
0.598227
false
aswolf/xmeos
xmeos/models/old/eoslib_old.py
1
11165
import numpy as np import scipy as sp #==================================================================== # EOSMod: Equation of State Model # eoslib- library of common equation of state models #==================================================================== # mgd_ref_model() # ref_model: BM3, BM4, VIN, LOG # ref_path: S, T, (P), # gamma_model: powlaw, shift_powlaw # # # set_therm_model( 'MGD', eos_d ) # set_ref_model( 'MGD', FixS=True, eos_d ) # set_ref_model( 'MGD', FixT=True, eos_d ) # # energy_therm_f = eos_d['func_d']['energy_therm_f'] # gamma_f = eos_d['func_d']['gamma_ref_f'] # temp_debye_f = eos_d['func_d']['temp_scale_ref_f'] # temp_ref_f = eos_d['func_d']['temp_ref_f'] #==================================================================== # SECT 0: Reference Compression Profiles #==================================================================== #==================================================================== def set_param( name_l, val_l, eos_d ): if 'param_d' in eos_d.keys(): param_d = eos_d['param_d'] else: param_d = {} eos_d['param_d'] = param_d for name, val in zip( name_l, val_l ): param_d[name] = val #==================================================================== def set_const( name_l, val_l, eos_d ): if 'const_d' in eos_d.keys(): const_d = eos_d['const_d'] else: const_d = init_const() eos_d['const_d'] = const_d for name, val in zip( name_l, val_l ): const_d[name] = val #==================================================================== def set_func( name_l, val_l, eos_d ): if 'param_d' in eos_d.keys(): param_d = eos_d['param_d'] else: param_d = {} eos_d['param_d'] = param_d for name, val in zip( name_l, val_l ): param_d[name] = val #==================================================================== def init_const(): const_d = {} const_d['eVperHa'] = 27.211 # eV/Ha const_d['JperHa'] = 4.35974434e-18 # J/Ha const_d['JperCal'] = 4.184 # J/Cal const_d['Nmol'] = 6.0221413e+23 # atoms/mol const_d['R'] = 8.314462 # J/K/mol const_d['kboltz'] = 8.617332e-5 # eV/K const_d['ang3percc'] = 1e24 # ang^3/cm^3 const_d['PV_ratio'] = 160.2176487 # (GPa*ang^3)/eV const_d['TS_ratio'] = const_d['R']/const_d['kboltz'] # (J/mol)/eV return const_d #==================================================================== #==================================================================== # SECT 1: Reference Compression Profiles #==================================================================== #==================================================================== # BM3- Birch Murnaghan 3rd Order #==================================================================== def press_BM3( V_a, eos_d ): # Retrieve parameter values param_d = eos_d['param_d'] V0 = param_d['V0'] K0 = param_d['K0'] KP0 = param_d['KP0'] vratio_a = V_a/V0 press_a = 3.0/2*K0 * (vratio_a**(-7.0/3) - vratio_a**(-5.0/3)) * \ (1 + 3.0/4*(KP0-4)*(vratio_a**(-2.0/3)-1)) return press_a #==================================================================== def energy_BM3( V_a, eos_d ): # Retrieve parameter values param_d = eos_d['param_d'] V0 = param_d['V0'] K0 = param_d['K0'] KP0 = param_d['KP0'] E0 = param_d['E0'] # Retrieve unit conversion ratio PV_ratio = eos_d['const_d']['PV_ratio'] vratio_a = V_a/V0 fstrain_a = 0.5*(vratio_a**(-2.0/3) - 1) energy_a = E0 + 9.0/2*(V0*K0/PV_ratio)*\ ( KP0*fstrain_a**3 + fstrain_a**2*(1-4*fstrain_a) ) return energy_a #==================================================================== #==================================================================== # SECT 2: Thermal EOS #==================================================================== #==================================================================== # Gruneisen Model #==================================================================== def press_mie_grun( V_a, T_a, eos_d ): V_a, T_a = fill_array( V_a, T_a ) # units const PV_ratio = eos_d['const_d']['PV_ratio'] # Needed functions energy_therm_f = eos_d['func_d']['energy_therm_f'] gamma_f = eos_d['func_d']['gamma_ref_f'] energy_therm_a = energy_therm_f( V_a, T_a, eos_d ) gamma_a = gamma_f( V_a, func_d ) press_therm_a = PV_ratio*(gamma_a/V_a)*energy_therm_a return press_therm_a #==================================================================== def gamma_powlaw( V_a, eos_d ): # get parameter values param_d = eos_d['param_d'] V0 = param_d['V0'] gamma0 = param_d['gamma0'] q = param_d['q'] gamma_a = gamma0 *(V_a/V0)**q return gamma_a #==================================================================== def temp_powlaw( V_a, T0, eos_d ): """ Return temperature for debye model V_a: sample volume array T0: temperature at V=V0 """ # get parameter values param_d = eos_d['param_d'] V0 = param_d['V0'] gamma0 = param_d['gamma0'] q = param_d['q'] gamma_a = gamma_powlaw( V_a, eos_d ) T_a = T0*np.exp( -(gamma_a - gamma0)/q ) return T_a #==================================================================== #==================================================================== # Debye Model #==================================================================== def energy_debye( V_a, T_a, eos_d ): ''' Thermal Energy for Debye model Relies on reference profile properties stored in eos_d defined by: * debye_temp_f( V_a, T_a ) * ref_temp_f( V_a, T_a ) ''' V_a, T_a = fill_array( V_a, T_a ) # get parameter values Cvmax = eos_d['param_d']['Cvmax'] TS_ratio = eos_d['const_d']['TS_ratio'] # get eos funcs temp_debye_f = eos_d['func_d']['temp_scale_ref_f'] temp_ref_f = eos_d['func_d']['temp_ref_f'] theta_a = temp_debye_f( V_a, eos_d ) Tref_a = temp_ref_f( V_a, eos_d ) energy_therm_a = (Cvmax/TS_ratio) \ *( T_a*debye_func( theta_a/T_a ) - Tref_a*debye_func( theta_a/Tref_a ) ) return energy_therm_a #==================================================================== def entropy_debye( V_a, T_a, eos_d ): V_a, T_a = fill_array( V_a, T_a ) # get parameter values param_d = eos_d['param_d'] T0 = param_d['T0'] theta0 = param_d['theta0'] Cvmax = param_d['Cvmax'] TS_ratio = eos_d['const_d']['TS_ratio'] theta_f = eos_d['func_d']['temp_scale_ref_f'] theta_a = theta_f( V_a, eos_d ) x_a = theta_a/T_a # entropy_a = Cvmax*Cv_const/3. \ # *(4*debye_func( x_a )-3*np.log( 1-np.exp( -x_a ) ) ) entropy_a = 1.0/3*(Cvmax/TS_ratio) \ *(4*debye_func( x_a )-3*np.log( np.exp( x_a ) - 1 ) + 3*x_a) return entropy_a #==================================================================== def heat_capacity_V_debye( V_a, T_a, eos_d ): V_a, T_a = fill_array( V_a, T_a ) # get parameter values Cvmax = eos_d['param_d']['Cvmax'] TS_ratio = eos_d['const_d']['TS_ratio'] # get funcs temp_debye_f = eos_d['func_d']['temp_scale_ref_f'] theta_a = temp_debye_f( V_a, eos_d ) # The reference adiabat terms in the internal energy are temperature # independent, and thus play no role in heat capacity x_a = theta_a/T_a heat_capacity_a = (Cvmax/TS_ratio)*(4*debye_func( x_a )-3*x_a/(np.exp(x_a)-1)) return heat_capacity_a #==================================================================== def debye_func( x_a ): """ Return debye integral value - calculation done using interpolation in a lookup table - interpolation done in log-space where behavior is close to linear - linear extrapolation is implemented manually """ if np.isscalar( x_a ): assert x_a >= 0, 'x_a values must be greater than zero.' else: assert all( x_a >= 0 ), 'x_a values must be greater than zero.' # Lookup table # interpolate in log space where behavior is nearly linear debyex_a = np.array( [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] ) debyelogf_a = np.array( [ 0.0, -0.03770187, -0.07580279, -0.11429475, -0.15316866, -0.19241674, -0.2320279 , -0.27199378, -0.31230405, -0.35294619, -0.39390815, -0.43518026, -0.47674953, -0.51860413, -0.56072866, -0.64573892, -0.73167389, -0.81841793, -0.90586032, -0.99388207, -1.08236598, -1.17119911, -1.26026101, -1.34944183, -1.43863241, -1.52771969, -1.61660856, -1.70519469, -1.79338479, -1.88108917, -1.96822938, -2.05471771, -2.14049175, -2.35134476, -2.55643273, -2.75507892, -2.94682783, -3.13143746, -3.30880053, -3.47894273, -3.64199587, -3.79820337, -3.94785746] ) # Create interpolation function logdeb_func = interpolate.interp1d( debyex_a, debyelogf_a, kind='cubic', bounds_error=False, fill_value=np.nan ) logfval_a = logdeb_func( x_a ) # Check for extrapolated values indicated by NaN # - replace with linear extrapolation logfextrap_a = debyelogf_a[-1] + (x_a - debyex_a[-1]) \ *(debyelogf_a[-1]-debyelogf_a[-2])/(debyex_a[-1]-debyex_a[-2]) logfval_a = np.where( x_a > debyex_a[-1], logfextrap_a, logfval_a ) # exponentiate to get integral value return np.exp( logfval_a ) #==================================================================== #==================================================================== # SECT N: Code Utility Functions #==================================================================== def fill_array( var1, var2 ): """ fix fill_array such that it returns two numpy arrays of equal size use numpy.full_like """ var1_a = np.asarray( var1 ) var2_a = np.asarray( var2 ) if var1_a.shape==(): var1_a = np.asarray( [var1] ) if var2_a.shape==(): var2_a = np.asarray( [var2] ) # Begin try/except block to handle all cases for filling an array while True: try: assert var1_a.shape == var2_a.shape break except: pass try: var1_a = np.full_like( var2_a, var1_a ) break except: pass try: var2_a = np.full_like( var1_a, var2_a ) break except: pass # If none of the cases properly handle it, throw error assert False, 'var1 and var2 must both be equal shape or size=1' return var1_a, var2_a #====================================================================
mit
6,840,345,582,503,291,000
32.93617
82
0.43708
false
webclub/CFI
Django/food/views.py
1
10085
import datetime import json import traceback from django.shortcuts import render from django.views.decorators.http import require_POST from django.views.decorators.csrf import csrf_exempt from django.utils import timezone from django.http import HttpResponse from django.contrib.auth.models import User # from models import School, Kitchen, Manager from models import * ACC_HEADERS = {'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'GET, POST, OPTIONS', 'Access-Control-Max-Age': 1000, 'Access-Control-Allow-Headers': '*'} def modify(response): for key, value in ACC_HEADERS.iteritems(): response[key] = value return response @require_POST @csrf_exempt def register_kitchen(request): name = request.POST.get('name', '') kitchen_type = request.POST.get('type', 'centralised') kitchen = Kitchen.objects.create(name=name, kitchen_type=kitchen_type) return modify(HttpResponse(json.dumps({'message': 'Kitchen done', 'kitchen_id': kitchen.id, }) )) @require_POST @csrf_exempt def register_school(request): name = request.POST.get('name', '') kitchen_id = request.POST.get('kitchen_id', 1) school = School.objects.create(name=name, kitchen_id=kitchen_id) return modify(HttpResponse(json.dumps({'message': 'School Registration Successful', 'school_id': school.id, }))) @require_POST @csrf_exempt def register_teacher(request): username = request.POST.get('username', '') firstname = request.POST.get('firstname', '') lastname = request.POST.get('lastname', '') school_id = request.POST.get('school', 1) role = request.POST.get('role', 1) school = School.objects.get(id=school_id) user = User.objects.create(username=username, first_name=firstname, last_name=lastname) teacher = Teacher.objects.create(user_id=user.id, school_id=school.id, role=role) return modify(HttpResponse(json.dumps({'message': 'Teacher Registration Successful'}))) @require_POST @csrf_exempt def register_manager(request): username = request.POST.get('username', '') firstname = request.POST.get('firstname', '') lastname = request.POST.get('lastname', '') kitchen_id = request.POST.get('kitchen', 1) role = request.POST.get('role', 1) kitchen = Kitchen.objects.get(id=kitchen_id) user = User.objects.create(username=username, first_name=firstname, last_name=lastname) manager = Manager.objects.create(user_id=user.id, kitchen_id=kitchen.id, role=role) return modify(HttpResponse(json.dumps({'message': 'Manager Registration Successful'}))) @require_POST @csrf_exempt def update_attendance(request): teacher_id = request.POST.get('teacher_id', 1) attendance_count = int(request.POST.get('attendance_count', 0)) teacher = Teacher.objects.get(id=teacher_id) try: school_attendance = Attendance.objects.get(school_id=teacher.school_id, date=timezone.now().date()) except Attendance.DoesNotExist: school_attendance = Attendance.objects.create(school_id=teacher.school_id, date=timezone.now().date()) if teacher.role: school_attendance.secondary = school_attendance.secondary + attendance_count else: school_attendance.primary += school_attendance.primary + attendance_count school_attendance.save() return modify(HttpResponse(json.dumps({'message': 'Attendance Updation Successful'}))) @require_POST @csrf_exempt def add_comment(request): teacher_id = request.POST.get('teacher_id', 1) comment = request.POST.get('comment', '') teacher = Teacher.objects.get(id=teacher_id) comment_for_school = Comments.objects.create(school_id=teacher.school_id, comment=comment) return modify(HttpResponse(json.dumps({ 'message': 'Comment Added', 'comment_id': comment_for_school.id, }) )) @csrf_exempt @require_POST def get_comments(request): manager_id = request.POST.get('manager_id', '') kitchen_id = Manager.objects.get(id=manager_id).kitchen_id schools = School.objects.filter(kitchen_id=kitchen_id) comments = [] for school in schools: comment = {} comment['school'] = school.name comment['messages'] = Comments.objects.filter(school_id=school.id, created=timezone.now().date()).values_list('comment', flat=True) print len(comment['messages']) comments.append(comment) return modify(HttpResponse(json.dumps(comments))) @require_POST @csrf_exempt def add_feedback(request): teacher_id = request.POST.get('teacher_id', 1) feedback = request.POST.get('feedback', '') teacher = Teacher.objects.get(id=teacher_id) feedback_for_school = Feedback.objects.create(school_id=teacher.school_id, feedback=feedback) return modify(HttpResponse(json.dumps({ 'message': 'Comment Added', 'feedback_id': feedback_for_school.id, }) )) @csrf_exempt @require_POST def get_feedback(request): manager_id = request.POST.get('manager_id', '') kitchen_id = Manager.objects.get(id=manager_id).kitchen_id schools = School.objects.filter(kitchen_id=kitchen_id) comments = [] for school in schools: comment = {} comment['school'] = school.name comment['messages'] = Feedback.objects.filter(school_id=school.id, created=timezone.now().date()).values_list('feedback', flat=True) print len(comment['messages']) comments.append(comment) return modify(HttpResponse(json.dumps(comments))) @require_POST @csrf_exempt def add_units(request): teacher_id = request.POST.get('teacher_id', 1) teacher = Teacher.objects.get(id=teacher_id) school_id = teacher.school_id data = request.POST.get('data', '') data = json.loads(data) try: for key, value in data.iteritems(): key = int(key) consumption = SchoolConsumption.objects.create(school_id=school_id, item_id=key) consumption.unit_consumed = value['consumed'] consumption.unit_left = value['left'] consumption.save() return modify(HttpResponse(json.dumps({ 'message': 'Units updated', }) )) except: print traceback.format_exc() return modify(HttpResponse('')) @require_POST def last_seven_days(request): manager_id = request.POST.get('manager_id', 1) kitchen_id = Manager.objects.get(id=manager_id).kitchen_id schools = School.objects.filter(kitchen_id=kitchen_id) for school in schools: school_attendance = Attendance.objects.filter(school_id=school.id, date__range=[ str(datetime.date.today() - datetime.timedelta(days=7)), str(datetime.date.today()) ]) school_consumption = SchoolConsumption.objects.filter(school_id=school.id, date__range=[ str(datetime.date.today() - datetime.timedelta( days=7 )), str(datetime.date.today()) ]) expected_attendance = ExpectedAttendance.objects.filter(school_id=school.id, date__range=[ str(datetime.date.today() - datetime.timedelta( days=7 )), str(datetime.date.today()) ]) expected_consumption = ExpectedConsumption.objects.filter(school_id=school.id, date__range=[ str(datetime.date.today() - datetime.timedelta( days=7 )), str(datetime.date.today()) ]) result_data = [] vals_past = zip(school_attendance, school_consumption) vals_expected = zip(expected_attendance, expected_consumption) for data in vals_past: result_data.append([data[0].primary, data[0].consumption]) for data in vals_expected: result_data.append([data[0].primary, data[0].consumption]) return modify(HttpResponse(json.dumps({'Rice': result_data})))
apache-2.0
-817,404,798,504,317,700
37.200758
117
0.521864
false
seneubert/LHCbPsi2spipi
DaVinci/psi2spipi.py
1
3152
from GaudiConf import IOHelper from Configurables import DaVinci, DecayTreeTuple from DecayTreeTuple.Configuration import * # Stream and stripping line we want to use stream = 'Dimuon' line = 'FullDSTDiMuonPsi2MuMuDetachedLine' rootInTES = '/Event/{0}'.format(stream) tesLoc = '/Event/{0}/Phys/{1}/Particles'.format(stream,line) # Build the decay tree by adding two pions to the psi(2s) # 1) Get the psi(2s) # get the selection(s) created by the stripping from PhysSelPython.Wrappers import Selection from PhysSelPython.Wrappers import SelectionSequence from PhysSelPython.Wrappers import DataOnDemand from StandardParticles import StdLooseMergedPi0, StdLooseResolvedPi0, StdLooseKaons, StdLoosePions psi2sSel = DataOnDemand(Location=tesLoc) #pionSel = DataOnDemand(Location = '/Event/Phys/StdAllNoPIDsPions/Particles') # 2) Get pions #from CommonParticles.StdAllNoPIDsPions import StdAllNoPIDsPions as Pions B_daughters = {'pi+' : '(PT > 250*MeV) & (P > 4000*MeV) & (MIPCHI2DV(PRIMARY) > 3) & (TRCHI2DOF < 5 ) & (TRGHP < 0.47)', 'pi-' : '(PT > 250*MeV) & (P > 4000*MeV) & (MIPCHI2DV(PRIMARY) > 3) & (TRCHI2DOF < 5 ) & (TRGHP < 0.47)', 'J/psi(1S)' : 'ALL'} # 3) Combine into B from Configurables import CombineParticles combCut = "(AM<7000*MeV) & (AM>4750*MeV)" combB = CombineParticles('Combine_B', Inputs = [tesLoc,"Phys/StdLoosePions/Particles"], DecayDescriptor = '[B0]CC -> J/psi(1S) pi+ pi-', DaughtersCuts = B_daughters, CombinationCut = combCut, MotherCut = '(VFASPF(VCHI2PDOF) < 9) & (BPVDIRA>0.999) & (PT>500*MeV) & (BPVLTIME()>0.2*ps) & (BPVIPCHI2()<25) ') Bsel = Selection('Sel_B', Algorithm=combB, RequiredSelections=[psi2sSel, StdLoosePions]) Bseq = SelectionSequence('Seq_B',TopSelection=Bsel) # Create an ntuple to capture D*+ decays from the StrippingLine line dtt = DecayTreeTuple('b2Psi2sPiPi') dtt.Inputs = [Bseq.outputLocation()] dtt.Decay = '[B0]CC -> ^(J/psi(1S) -> ^mu+ ^mu-) ^pi+ ^pi-' dtt.addBranches({'B0' : '[B0]CC -> (J/psi(1S) -> mu+ mu-) pi+ pi-'}) dtt.B0.addTupleTool('TupleToolDecayTreeFitter/DTF') dtt.B0.DTF.constrainToOriginVertex = True dtt.B0.DTF.Verbose = True bssubs = { 'B0 -> ^J/psi(1S) pi+ pi-':'J/psi(2S)', 'B~0 -> ^J/psi(1S) pi+ pi-':'J/psi(2S)', } dtt.B0.DTF.Substitutions = bssubs dtt.B0.DTF.daughtersToConstrain = [ "J/psi(2S)"] # Configure DaVinci seq = GaudiSequencer('MyTupleSeq') seq.Members += [Bseq.sequence()] seq.Members += [dtt] DaVinci().appendToMainSequence([seq]) DaVinci().InputType = 'DST' #DaVinci().RootInTES = rootInTES DaVinci().TupleFile = 'b2psi2spipi.root' DaVinci().DDDBtag = 'dddb-20130111' DaVinci().CondDBtag = 'cond-20130114' DaVinci().PrintFreq = 1000 DaVinci().DataType = '2012' DaVinci().Simulation = False # Only ask for luminosity information when not using simulated data DaVinci().Lumi = not DaVinci().Simulation DaVinci().EvtMax = -1 # Use the local input data #IOHelper().inputFiles([ # './00035742_00000002_1.allstreams.dst' #], clear=True)
gpl-2.0
-8,127,897,114,153,845,000
36.975904
138
0.668782
false
bluesquall/rotpy
rotpy/random.py
1
2344
#!/usr/bin/env python """ Random submodule for `rotpy` ============================ This module provides methods for generating random rotations. note: Methods defined here _may_ be moved to separate modules corresponding to each rotation parametrization in the future. That will depend on where we take the overall package architecture. """ import numpy as np def rotation_matrix(p=3, N=1, algorithm='Cayley'): """Random matrix in SO(p). Parameters ---------- p : int Dimension of random rotation matrix. N : int Number of matrices to generate. Returns ------- R : ndarray (N-by-N) Random matrix in SO(p) .. note:: The current code does not make matrices _uniformly_ random. """ # TODO support returning multiple matrices if algorithm == 'Cayley': Q = skew_matrix(p) I = np.eye(p) R = np.linalg.solve(I - Q, I + Q) else: err = "Requested algorithm: {0} is not implemented." raise NotImplementedError(err.format(algorithm)) return R def skew_matrix(p=3, algorithm='axiom'): """Uniformly random skew-symmetric matrix. Parameters ---------- p : int Dimension of random skew-symmetric matrix. Returns ------- Q : ndarray (p-by-p) Uniformly random skew-symmetric matrix. """ if algorithm == "axiom": A = np.random.uniform(size=[p, p]) Q = A - A.T elif algorithm == "vector": Q = vector_to_skew_matrix(np.random.uniform(size=p)) else: err = "Requested algorithm: {0} is not implemented." raise NotImplementedError(err.format(algorithm)) return Q def vector_to_skew_matrix(v): """Skew-symmetric matrix. Parameters ---------- v : array_like Vector to skew-symmetrize. Returns ------- Q : ndarray Skew-symmetric matrix. .. note:: This method is useful beyond generating random rotations, so it may be moved to a different module in the future. """ if len(v) != 3: raise NotImplementedError('only 3D is implemented') else: Q = np.zeros([len(v), len(v)]) Q[1, 2] = -v[0] Q[2, 1] = v[0] Q[0, 2] = -v[1] Q[2, 0] = v[1] Q[0, 1] = -v[2] Q[1, 0] = v[2] return Q
mit
-5,706,193,940,483,044,000
22.918367
76
0.570819
false
mirrorcoloured/slcypi
MA/draw_matches.py
1
2544
def draw_matches(img1, kp1, img2, kp2, matches, color=None): """Draws lines between matching keypoints of two images. Keypoints not in a matching pair are not drawn. Places the images side by side in a new image and draws circles around each keypoint, with line segments connecting matching pairs. You can tweak the r, thickness, and figsize values as needed. Args: img1: An openCV image ndarray in a grayscale or color format. kp1: A list of cv2.KeyPoint objects for img1. img2: An openCV image ndarray of the same format and with the same element type as img1. kp2: A list of cv2.KeyPoint objects for img2. matches: A list of DMatch objects whose trainIdx attribute refers to img1 keypoints and whose queryIdx attribute refers to img2 keypoints. color: The color of the circles and connecting lines drawn on the images. A 3-tuple for color images, a scalar for grayscale images. If None, these values are randomly generated. """ # We're drawing them side by side. Get dimensions accordingly. # Handle both color and grayscale images. if len(img1.shape) == 3: new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], img1.shape[2]) elif len(img1.shape) == 2: new_shape = (max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1]) new_img = np.zeros(new_shape, type(img1.flat[0])) # Place images onto the new image. new_img[0:img1.shape[0],0:img1.shape[1]] = img1 new_img[0:img2.shape[0],img1.shape[1]:img1.shape[1]+img2.shape[1]] = img2 # Draw lines between matches. Make sure to offset kp coords in second image appropriately. r = 15 thickness = 2 if color: c = color for m in matches: # Generate random color for RGB/BGR and grayscale images as needed. if not color: c = np.random.randint(0,256,3) if len(img1.shape) == 3 else np.random.randint(0,256) # So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things, # wants locs as a tuple of ints. end1 = tuple(np.round(kp1[m.trainIdx].pt).astype(int)) end2 = tuple(np.round(kp2[m.queryIdx].pt).astype(int) + np.array([img1.shape[1], 0])) cv2.line(new_img, end1, end2, c, thickness) cv2.circle(new_img, end1, r, c, thickness) cv2.circle(new_img, end2, r, c, thickness) plt.figure(figsize=(15,15)) plt.imshow(new_img) plt.show()
mit
-8,938,002,548,867,191,000
48.901961
100
0.65173
false
openstack/senlin
senlin/common/service.py
1
2623
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_service import service from oslo_service import sslutils from oslo_service import wsgi from oslo_utils import netutils import senlin.conf from senlin import version CONF = senlin.conf.CONF LOG = logging.getLogger(__name__) class Service(service.Service): def __init__(self, name, host, topic, threads=None): threads = threads or 1000 super(Service, self).__init__(threads) self.name = name self.host = host self.topic = topic def start(self): LOG.info('Starting %(name)s service (version: %(version)s)', { 'name': self.name, 'version': version.version_info.version_string() }) super(Service, self).start() def stop(self, graceful=True): LOG.info('Stopping %(name)s service', {'name': self.name}) super(Service, self).stop(graceful) class WSGIService(service.Service): def __init__(self, app, name, listen, max_url_len=None): super(WSGIService, self).__init__(CONF.senlin_api.threads) self.app = app self.name = name self.listen = listen self.servers = [] for address in self.listen: host, port = netutils.parse_host_port(address) server = wsgi.Server( CONF, name, app, host=host, port=port, pool_size=CONF.senlin_api.threads, use_ssl=sslutils.is_enabled(CONF), max_url_len=max_url_len ) self.servers.append(server) def start(self): for server in self.servers: server.start() super(WSGIService, self).start() def stop(self, graceful=True): for server in self.servers: server.stop() super(WSGIService, self).stop(graceful) def wait(self): for server in self.servers: server.wait() super(WSGIService, self).wait()
apache-2.0
5,670,647,009,074,642,000
29.5
78
0.60732
false
eHanlin/csv-map-converter
csv_map_converter/converter.py
1
2150
# -*- coding: utf-8 -*- from .parsers.parser import CsvMapParser from .models.fields.base import ListField from .models.fields import ModelDescriptor class ConvertResult(object): def __init__(self, map_result, models = None): self.__map_result = map_result self.__models = models @property def models(self): return self.__models @property def map_result(self): return self.__map_result class Converter(object): """ This is csv converter. """ def __init__(self): self.__csv_map_parser = CsvMapParser() def __convert_parsing_result_to_models(self, csv_map_result, Model): titles = csv_map_result.get_titles() data_rows = csv_map_result.get_content() model_descriptor = ModelDescriptor(Model) models = [] for data_row in data_rows: model = Model() for title in titles: field = model_descriptor.get(title) if field: cell_data = data_row.get(title) if not(isinstance(field, ListField)) and cell_data and len(cell_data): cell_data = cell_data[0] if not(cell_data) and field.default: field_value = field.default else: field_value = field.to_python(cell_data) #setattr(model, title, field_value) model_descriptor.set(model, title, field_value) models.append(model) return models def convert(self, lines, Model = None, start_row = 1, title_row = 0): """ Parse lines of titles and data to array map. :Args - lines - two dimensional array of string. - start_row - start parsing data row index. - title_row - title row index. """ csv_map_result = self.__csv_map_parser.parse(lines, start_row = start_row, title_row = title_row) if Model: models = self.__convert_parsing_result_to_models(csv_map_result, Model) else: models = None return ConvertResult(csv_map_result, models)
mit
5,686,770,578,526,899,000
28.054054
115
0.568837
false
Pyfa-fit/EVE_Gnosis
examples/Vengeance.py
1
2031
# noinspection PyPackageRequirements from EVE_Gnosis.simulations.capacitor import Capacitor from EVE_Gnosis.formulas.formulas import Formulas from datetime import datetime module_list = [] capacitor_amount = 375 capacitor_recharge = 105468.75 print("Start time: ", datetime.now().time()) module_list.append( { 'Amount': 10, 'CycleTime': 2500, 'Charges': False, 'ReloadTime': False, } ) # Small T2 Nos module_list.append( { 'Amount': -1.5, 'CycleTime': 5000, 'Charges': False, 'ReloadTime': False, } ) # J5b Enduring Warp Scrambler module_list.append( { 'Amount': -1.5, 'CycleTime': 5000, 'Charges': False, 'ReloadTime': False, } ) # X5 Enduring Statis Webifier module_list.append( { 'Amount': -40, 'CycleTime': 4500, 'Charges': False, 'ReloadTime': False, } ) # Small Ancilliary Armor Repairer module_list.append( { 'Amount': -10.5, 'CycleTime': 5000, 'Charges': False, 'ReloadTime': False, } ) # Reactive Armor Hardener return_value = Capacitor.capacitor_time_simulator(module_list, capacitor_amount, capacitor_recharge) return_matrix = Formulas.capacitor_shield_regen_matrix(capacitor_amount, capacitor_recharge) pass # Add break here if you want to see anything. print("End time: ", datetime.now().time()) ''' Note that not all modules effect cap. Even though the full fit is below,most of the modules have no impact on cap. [Vengeance, Heavy Tackle] Energized Adaptive Nano Membrane II Small Ancillary Armor Repairer Reactive Armor Hardener True Sansha Adaptive Nano Plating 5MN Quad LiF Restrained Microwarpdrive J5b Enduring Warp Scrambler X5 Enduring Stasis Webifier Rocket Launcher II, Nova Rage Rocket Rocket Launcher II, Nova Rage Rocket Rocket Launcher II, Nova Rage Rocket Rocket Launcher II, Nova Rage Rocket Small Energy Nosferatu II Small Anti-Thermal Pump II Small Auxiliary Nano Pump II '''
gpl-3.0
-3,939,051,601,150,690,300
22.616279
115
0.677006
false
geomagpy/MARTAS
libmqtt/mysqlprotocol.py
1
15238
from __future__ import print_function from __future__ import absolute_import # ################################################################### # Import packages # ################################################################### import re # for interpretation of lines import struct # for binary representation import socket # for hostname identification import string # for ascii selection import numpy as np from datetime import datetime, timedelta from twisted.protocols.basic import LineReceiver from twisted.python import log from core import acquisitionsupport as acs from magpy.stream import KEYLIST import magpy.opt.cred as mpcred import magpy.database as mdb ## MySQL protocol ## -------------------- class MySQLProtocol(object): """ Protocol to read SQL data (usually from ttyACM0) MySQL protocol reads data from a MagPy database. All Sensors which receive continuous data updates are identified and added to the sensors.cfg list (marked as inactive). Here data can be selected and deselected. Update requires the removal of all data of a specific database from sensors.cfg. MySQL is an active protocol, requesting data at defined periods. """ ## need a reference to our WS-MCU gateway factory to dispatch PubSub events ## def __init__(self, client, sensordict, confdict): self.client = client #self.wsMcuFactory = wsMcuFactory self.sensordict = sensordict self.confdict = confdict self.count = 0 ## counter for sending header information self.sensor = sensordict.get('sensorid') self.hostname = socket.gethostname() self.printable = set(string.printable) self.datalst = [] self.datacnt = 0 self.metacnt = 10 self.sensorlist = [] self.revision = self.sensordict.get('revision','') try: self.requestrate = int(self.sensordict.get('rate','-')) except: self.requestrate = 30 self.deltathreshold = confdict.get('timedelta') # debug mode debugtest = confdict.get('debug') self.debug = False if debugtest == 'True': log.msg(' DEBUG - {}: Debug mode activated.'.format(self.sensordict.get('protocol'))) self.debug = True # prints many test messages else: log.msg(' -> Debug mode = {}'.format(debugtest)) # QOS self.qos=int(confdict.get('mqttqos',0)) if not self.qos in [0,1,2]: self.qos = 0 log.msg(" -> setting QOS:", self.qos) # Database specific self.db = self.sensor # get existing sensors for the relevant board log.msg(" -> IMPORTANT: MySQL assumes that database credentials ") log.msg(" are saved locally using magpy.opt.cred with the same name as database") try: self.db = mdb.mysql.connect(host=mpcred.lc(self.sensor,'host'),user=mpcred.lc(self.sensor,'user'),passwd=mpcred.lc(self.sensor,'passwd'),db=self.sensor) self.connectionMade(self.sensor) except: self.connectionLost(self.sensor,"Database could not be connected - check existance/credentials") return sensorlist = self.GetDBSensorList(self.db, searchsql='') self.sensor = '' existinglist = acs.GetSensors(confdict.get('sensorsconf'),identifier='$') # if there is a sensor in existinglist which is not an active sensor, then drop it for sensdict in existinglist: if sensdict.get('sensorid','') in sensorlist: self.sensorlist.append(sensdict) self.lastt = [None]*len(self.sensorlist) #print ("Existinglist") #print ("----------------------------------------------------------------") #print (self.sensorlist) def connectionMade(self, dbname): log.msg(' -> Database {} connected.'.format(dbname)) def connectionLost(self, dbname, reason=''): log.msg(' -> Database {} lost/not connectect. ({})'.format(dbname,reason)) # implement counter and add three reconnection events here def GetDBSensorList(self, db, searchsql=''): """ DESCRIPTION: Will connect to data base and download all data id's satisfying searchsql and containing data less then 5*sampling rate old. PARAMETER: existinglist: [list] [[1,2,...],['BM35_xxx_0001','SH75_xxx_0001',...]] idnum is stored in sensordict['path'] (like ow) """ now = datetime.utcnow() senslist1, senslist2, senslist3 = [],[],[] # 1. Get search criteria (group and dataid): searchdataid = 'DataID LIKE "%{}"'.format(self.sensordict.get('revision','')) searchgroup = 'SensorGroup LIKE "%{}%"'.format(self.sensordict.get('sensorgroup','')) # 2. Perfom search for DataID: senslist1 = mdb.dbselect(db, 'SensorID', 'DATAINFO', searchdataid) if self.debug: log.msg(" -> DEBUG - Search DATAID {}: Found {} tables".format(self.sensordict.get('revision',''),len(senslist1))) # 3. Perfom search for group: senslist2 = mdb.dbselect(db, 'SensorID', 'SENSORS', searchgroup) if self.debug: log.msg(" -> DEBUG - Searching for GROUP {}: Found {} tables".format(self.sensordict.get('sensorgroup',''),len(senslist2))) # 4. Combine searchlists senslist = list(set(senslist1).intersection(senslist2)) if self.debug: log.msg(" -> DEBUG - Fullfilling both search criteria: Found {} tables".format(len(senslist))) # 5. Check tables with above search criteria for recent data: for sens in senslist: datatable = sens + "_" + self.sensordict.get('revision','') lasttime = mdb.dbselect(db,'time',datatable,expert="ORDER BY time DESC LIMIT 1") try: lt = datetime.strptime(lasttime[0],"%Y-%m-%d %H:%M:%S.%f") delta = now-lt if self.debug: log.msg(" -> DEBUG - Sensor {}: Timediff = {} sec from now".format(sens, delta.total_seconds())) if delta.total_seconds() < self.deltathreshold: senslist3.append(sens) except: if self.debug: log.msg(" -> DEBUG - No data table?") pass # 6. Obtaining relevant sensor data for each table log.msg(" -> Appending sensor information to sensors.cfg") for sens in senslist3: values = {} values['sensorid'] = sens values['protocol'] = 'MySQL' values['port'] = '-' cond = 'SensorID = "{}"'.format(sens) vals = mdb.dbselect(db,'SensorName,SensorID,SensorSerialNum,SensorRevision,SensorGroup,SensorDescription,SensorTime','SENSORS',condition=cond)[0] vals = ['-' if el==None else el for el in vals] values['serialnumber'] = vals[2] values['name'] = vals[0] values['revision'] = vals[3] values['mode'] = 'active' pier = mdb.dbselect(db,'DataPier','DATAINFO',condition=cond)[0] values['pierid'] = pier values['ptime'] = vals[6] values['sensorgroup'] = vals[4] values['sensordesc'] = vals[5].replace(',',';') success = acs.AddSensor(self.confdict.get('sensorsconf'), values, block='SQL') return senslist3 def sendRequest(self): """ source:mysql: Method to obtain data from table """ t1 = datetime.utcnow() outdate = datetime.strftime(t1, "%Y-%m-%d") filename = outdate if self.debug: log.msg(" -> DEBUG - Sending periodic request ...") def getList(sql): cursor = self.db.cursor() try: cursor.execute(sql) except mysql.IntegrityError as message: return message except mysql.Error as message: return message except: return 'dbgetlines: unkown error' head = cursor.fetchall() keys = list(np.transpose(np.asarray(head))[0]) return keys # get self.sensorlist # get last timestamps # read all data for each sensor since last timestamp # send that and store last timestamp for index,sensdict in enumerate(self.sensorlist): sensorid = sensdict.get('sensorid') if self.debug: log.msg(" -> DEBUG - dealing with sensor {}".format(sensorid)) # 1. Getting header # ----------------- # load keys, elements and units #header = "# MagPyBin %s %s %s %s %s %s %d" % (sensorid, key, ele, unit, multplier, packcode, struct.calcsize('<'+packcode)) dataid = sensorid+'_'+self.revision keyssql = 'SHOW COLUMNS FROM %s' % (dataid) keystab = getList(keyssql) if 'time' in keystab: keystab.remove('time') if 'flag' in keystab: keystab.remove('flag') if 'typ' in keystab: keystab.remove('typ') if 'comment' in keystab: keystab.remove('comment') keys = ','.join(keystab) if self.debug: log.msg(" -> DEBUG - requesting header {}".format(sensorid)) sql1 = 'SELECT SensorElements FROM SENSORS WHERE SensorID LIKE "{}"'.format(sensorid) sql2 = 'SELECT Sensorkeys FROM SENSORS WHERE SensorID LIKE "{}"'.format(sensorid) sql3 = 'SELECT ColumnUnits FROM DATAINFO WHERE SensorID LIKE "{}"'.format(sensorid) sql4 = 'SELECT ColumnContents FROM DATAINFO WHERE SensorID LIKE "{}"'.format(sensorid) try: elem = getList(sql1)[0].split(',') except: elem =[] try: keyssens = getList(sql2)[0].split(',') except: keyssens =[] try: unit = getList(sql3)[0].split(',') except: unit =[] try: cont = getList(sql4)[0].split(',') except: cont =[] units, elems = [], [] for key in keystab: try: pos1 = keyssens.index(key) ele = elem[pos1] except: ele = key elems.append(ele) try: pos2 = cont.index(ele) units.append(unit[pos2]) except: units.append('None') if self.debug: log.msg(" -> DEBUG - creating head line {}".format(sensorid)) multplier = '['+','.join(map(str, [10000]*len(keystab)))+']' packcode = '6HL'+''.join(['q']*len(keystab)) header = ("# MagPyBin {} {} {} {} {} {} {}".format(sensorid, '['+','.join(keystab)+']', '['+','.join(elems)+']', '['+','.join(units)+']', multplier, packcode, struct.calcsize('<'+packcode))) # 2. Getting dict sql = 'SELECT DataSamplingRate FROM DATAINFO WHERE SensorID LIKE "{}"'.format(sensorid) sr = float(getList(sql)[0]) coverage = int(self.requestrate/sr)+120 # 3. Getting data # get data and create typical message topic # based on sampling rate and collection rate -> define coverage li = sorted(mdb.dbselect(self.db, 'time,'+keys, dataid, expert='ORDER BY time DESC LIMIT {}'.format(int(coverage)))) if not self.lastt[index]: self.lastt[index]=li[0][0] # drop newdat = False newli = [] for elem in li: if elem[0] == self.lastt[index]: newdat = True if newdat: newli.append(elem) if not len(newli) > 0: # if last time not included in li then newli will be empty # in this case just add the list for elem in li: newli.append(elem) for dataline in newli: timestamp = dataline[0] data_bin = None datearray = '' try: datearray = acs.timeToArray(timestamp) for i,para in enumerate(keystab): try: val=int(float(dataline[i+1])*10000) except: val=999990000 datearray.append(val) data_bin = struct.pack('<'+packcode,*datearray) # little endian except: log.msg('Error while packing binary data') if not self.confdict.get('bufferdirectory','') == '' and data_bin: acs.dataToFile(self.confdict.get('bufferdirectory'), sensorid, filename, data_bin, header) if self.debug: log.msg(" -> DEBUG - sending ... {}".format(','.join(list(map(str,datearray))), header)) self.sendData(sensorid,','.join(list(map(str,datearray))),header,len(newli)-1) self.lastt[index]=li[-1][0] t2 = datetime.utcnow() if self.debug: log.msg(" -> DEBUG - Needed {}".format(t2-t1)) def sendData(self, sensorid, data, head, stack=None): topic = self.confdict.get('station') + '/' + sensorid senddata = False if not stack: stack = int(self.sensordict.get('stack')) coll = stack if coll > 1: self.metacnt = 1 # send meta data with every block if self.datacnt < coll: self.datalst.append(data) self.datacnt += 1 else: senddata = True data = ';'.join(self.datalst) self.datalst = [] self.datacnt = 0 else: senddata = True if senddata: if self.count == 0: # get all values initially from the database #add = "SensoriD:{},StationID:{},DataPier:{},SensorModule:{},SensorGroup:{},SensorDecription:{},DataTimeProtocol:{}".format( sensorid, self.confdict.get('station',''),self.sensordict.get('pierid',''), self.sensordict.get('protocol',''),self.sensordict.get('sensorgroup',''),self.sensordict.get('sensordesc',''), self.sensordict.get('ptime','') ) #self.client.publish(topic+"/dict", add, qos=self.qos) self.client.publish(topic+"/meta", head, qos=self.qos) if self.debug: log.msg(" -> DEBUG - Publishing meta --", topic, head) self.client.publish(topic+"/data", data, qos=self.qos) if self.debug: log.msg(" -> DEBUG - Publishing data") self.count += 1 if self.count >= self.metacnt: self.count = 0
gpl-3.0
1,586,417,300,568,176,400
40.747945
365
0.533863
false
praekelt/seed-identity-store
identities/serializers.py
1
1946
from django.contrib.auth.models import Group, User from rest_framework import serializers from rest_hooks.models import Hook from .models import Identity, OptIn, OptOut class UserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ("url", "username", "email", "groups") class GroupSerializer(serializers.ModelSerializer): class Meta: model = Group fields = ("url", "name") class CreateUserSerializer(serializers.Serializer): email = serializers.EmailField() class IdentitySerializer(serializers.ModelSerializer): class Meta: model = Identity read_only_fields = ("created_at", "updated_at") fields = ( "id", "version", "details", "communicate_through", "operator", "created_at", "created_by", "updated_at", "updated_by", ) class OptOutSerializer(serializers.ModelSerializer): class Meta: model = OptOut fields = ( "id", "optout_type", "identity", "address_type", "address", "request_source", "requestor_source_id", "reason", "created_at", ) read_only_fields = ("created_by",) class OptInSerializer(serializers.ModelSerializer): class Meta: model = OptIn fields = ( "id", "identity", "address_type", "address", "request_source", "requestor_source_id", "created_at", ) read_only_fields = ("created_by",) class HookSerializer(serializers.ModelSerializer): class Meta: model = Hook read_only_fields = ("user",) fields = "__all__" class AddressSerializer(serializers.Serializer): address = serializers.CharField(max_length=500)
bsd-3-clause
-5,285,397,995,805,764,000
23.024691
55
0.554985
false
khchine5/lino-welfare
lino_welfare/modlib/debts/models.py
1
25056
# -*- coding: UTF-8 -*- # Copyright 2012-2017 Luc Saffre # This file is part of Lino Welfare. # # Lino Welfare is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # Lino Welfare is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public # License along with Lino Welfare. If not, see # <http://www.gnu.org/licenses/>. """ Database models for `lino_welfare.modlib.debts`. """ from __future__ import unicode_literals from builtins import str import logging logger = logging.getLogger(__name__) import decimal from django.db import models from django.conf import settings from django.core.exceptions import ValidationError from django.utils.encoding import force_text from lino.api import dd, rt, _, pgettext from etgen.html import E from lino import mixins from lino_xl.lib.accounts.choicelists import AccountTypes from lino_xl.lib.excerpts.mixins import Certifiable from lino.modlib.users.mixins import UserAuthored from .fields import PeriodsField from .mixins import SequencedBudgetComponent, ActorBase, MainActor from .choicelists import TableLayouts from django.db import transaction if False: @transaction.commit_on_success def bulk_create_with_manual_ids(model, obj_list): """ Originally copied from http://stackoverflow.com/a/13143062/407239 """ last = model.objects.all().aggregate(models.Max('id'))['id__max'] if last is None: id_start = 1 else: id_start = last + 1 for i, obj in enumerate(obj_list): obj.id = id_start + i # print 20130508, [dd.obj2str(o) for o in obj_list] return model.objects.bulk_create(obj_list) class Group(mixins.BabelNamed): "A group of accounts." class Meta: verbose_name = _("Account Group") verbose_name_plural = _("Account Groups") # ref = dd.NullCharField( # max_length=settings.SITE.plugins.debts.ref_length, unique=True) ref = models.CharField( max_length=settings.SITE.plugins.debts.ref_length, blank=True, null=True, unique=True) account_type = AccountTypes.field(blank=True) entries_layout = TableLayouts.field(_("Budget entries layout"), blank=True) @dd.python_2_unicode_compatible class Account(mixins.BabelNamed, mixins.Sequenced, mixins.Referrable): """An **account** is an item of an account chart used to collect ledger transactions or other accountable items. .. attribute:: name The multilingual designation of this account, as the users see it. .. attribute:: group The *account group* to which this account belongs. This must point to an instance of :class:`Group`. .. attribute:: seqno The sequence number of this account within its :attr:`group`. .. attribute:: ref An optional unique name which can be used to reference a given account. .. attribute:: type The *account type* of this account. This must point to an item of :class:`lino_xl.lib.accounts.AccountTypes`. """ ref_max_length = settings.SITE.plugins.debts.ref_length class Meta: verbose_name = _("Account") verbose_name_plural = _("Accounts") ordering = ['ref'] group = dd.ForeignKey('debts.Group') type = AccountTypes.field() required_for_household = models.BooleanField( _("Required for Households"), default=False) required_for_person = models.BooleanField( _("Required for Persons"), default=False) periods = PeriodsField(_("Periods")) default_amount = dd.PriceField(_("Default amount"), blank=True, null=True) def full_clean(self, *args, **kw): if self.group_id is not None: if not self.ref: qs = rt.models.debts.Account.objects.all() self.ref = str(qs.count() + 1) if not self.name: self.name = self.group.name self.type = self.group.account_type super(Account, self).full_clean(*args, **kw) def __str__(self): return "(%(ref)s) %(title)s" % dict( ref=self.ref, title=settings.SITE.babelattr(self, 'name')) @dd.python_2_unicode_compatible class Budget(UserAuthored, Certifiable, mixins.Duplicable): """A document which expresses the financial situation of a partner at a given date. """ class Meta: verbose_name = _("Budget") verbose_name_plural = _("Budgets") quick_search_fields = 'partner__name' date = models.DateField( _("Date"), blank=True, default=dd.today) partner = dd.ForeignKey('contacts.Partner') print_todos = models.BooleanField( _("Print to-do list"), default=False, help_text=_("""\ Einträge im Feld "To-do" werden nur ausgedruckt, wenn die Option "To-dos drucken" des Budgets angekreuzt ist. Diese Option wird aber momentan noch ignoriert (d.h. To-do-Liste wird gar nicht ausgedruckt), weil wir noch überlegen müssen, *wie* sie ausgedruckt werden sollen. Vielleicht mit Fußnoten?""")) print_empty_rows = models.BooleanField( _("Print empty rows"), default=False, help_text=_("Check this to print also empty rows " "for later completion.")) include_yearly_incomes = models.BooleanField( _("Include yearly incomes"), default=False, help_text=_("Check this to include yearly incomes in the " "Debts Overview table of this Budget.")) intro = dd.RichTextField(_("Introduction"), format="html", blank=True) conclusion = dd.RichTextField(_("Conclusion"), format="html", blank=True) dist_amount = dd.PriceField( _("Distributable amount"), default=120, help_text=_("The total monthly amount available " "for debts distribution.")) def __str__(self): if self.pk is None: return str(_("New")) + ' ' + str(self._meta.verbose_name) return force_text( _("Budget %(pk)d for %(partner)s") % dict(pk=self.pk, partner=self.partner)) @classmethod def get_certifiable_fields(cls): return """date partner user intro dist_amount include_yearly_incomes print_empty_rows print_todos""" def get_actors(self): """Return a list of the actors of this budget.""" attname = "_cached_actors" if hasattr(self, attname): return getattr(self, attname) l = list(self.actor_set.all()) if len(l) > 0: main_header = _("Common") else: main_header = _("Amount") l.insert(0, MainActor(self, main_header)) setattr(self, attname, l) return l def get_actor_index(self, actor): for i, a in enumerate(self.get_actors()): if actor == a: return i raise Exception("No actor '%s' in %s" % (actor, self)) def get_actor(self, n): l = self.get_actors() if len(l) > n: return l[n] else: return None def get_print_language(self): if self.partner: return self.partner.language return super(Budget, self).get_print_language() @property def actor1(self): return self.get_actor(0) @property def actor2(self): return self.get_actor(1) @property def actor3(self): return self.get_actor(2) def entry_groups(self, ar, types=None, **kw): """Yield the **entry groups** for this budget, i.e. one item for each account group for which this budget has some data. :types: an optional string specifying a set of one-letter account type names. See :class:`AccountTypes <lino_xl.lib.accounts.AccountTypes>`. Each entry group is encapsulated as a volatile helper object :class:`lino_welfare.modlib.debts.ui.EntryGroup`. """ Group = rt.models.debts.Group kw.update(entries_layout__gt='') if types is not None: kw.update( account_type__in=[AccountTypes.items_dict[t] for t in types]) for g in Group.objects.filter(**kw).order_by('ref'): eg = EntryGroup(self, g, ar) if eg.has_data(): yield eg def unused_account_groups(self, types=None, **kw): """Yield all AccountGroups which have at least one entry in this Budget. Parameters: types: an optional string specifying a set of one-letter account type names. See :class: `AccountTypes <lino_xl.lib.accounts.AccountTypes>`. """ if types is not None: kw.update(account_type__in=[AccountTypes.items_dict[t] for t in types]) Group = rt.models.debts.Group for g in Group.objects.filter(**kw).order_by('ref'): if Entry.objects.filter(budget=self, account__group=g).count(): yield g def unused_entries_by_group(self, ar, group, **kw): """Return a TableRequest showing the entries of this budget for the given `group`, using the table layout depending on AccountType. Parameters: ar: the ActionRequest group: an instance of :class:`debts.Group <lino_welfare.modlib.debts.models.Group>`. """ t = entries_table_for_group(group) # print '20130327 entries_by_group', self, t if t is None: return None ar = ar.spawn(t, master_instance=self, title=str(group), filter=models.Q(account__group=group), **kw) # print 20120606, sar return ar def sum(self, fldname, types=None, exclude=None, *args, **kw): """Compute and return the sum of `fldname` (either ``amount`` or `monthly_rate` """ fldnames = [fldname] if types is not None: kw.update(account_type__in=[AccountTypes.items_dict[t] for t in types]) rv = decimal.Decimal(0) kw.update(budget=self) qs = Entry.objects.filter(*args, **kw) if exclude is not None: qs = qs.exclude(**exclude) for e in qs.annotate(models.Sum(fldname)): amount = decimal.Decimal(0) for n in fldnames: a = getattr(e, n + '__sum', None) if a is not None: amount += a if e.periods != 1: amount = amount / decimal.Decimal(e.periods) rv += amount return rv def after_ui_save(self, ar, cw): """ Called after successful save() """ self.fill_defaults(ar) def fill_defaults(self, ar=None): """ If the budget is empty, fill it with default entries by copying the master_budget. """ Entry = rt.models.debts.Entry Actor = rt.models.debts.Actor Account = rt.models.debts.Account if not self.partner_id or self.printed_by is not None: return if self.entry_set.all().count() > 0: return self.save() entries = [] master_budget = settings.SITE.site_config.master_budget if master_budget is None: flt = models.Q(required_for_household=True) flt = flt | models.Q(required_for_person=True) seqno = 0 for acc in Account.objects.filter(flt).order_by('ref'): seqno += 1 e = Entry(account=acc, budget=self, seqno=seqno, account_type=acc.type) e.account_changed(ar) # e.periods = e.account.periods if e.account.default_amount: e.amount = e.account.default_amount entries.append(e) else: for me in master_budget.entry_set.order_by( 'seqno').select_related(): e = Entry(account=me.account, budget=self, account_type=me.account_type, seqno=me.seqno, periods=me.periods, amount=me.amount) e.account_changed(ar) entries.append(e) if False: # fails in Django 1.6 bulk_create_with_manual_ids(Entry, entries) else: for e in entries: e.full_clean() e.save() if self.actor_set.all().count() == 0: household = self.partner.get_mti_child('household') if household: mr = False mrs = False for m in household.member_set.filter(person__isnull=False): # for m in household.member_set.all(): # if m.role and m.role.header: # header = m.role.header if m.person.gender == dd.Genders.male and not mr: header = str(_("Mr.")) mr = True elif m.person.gender == dd.Genders.female and not mrs: header = str(_("Mrs.")) mrs = True else: header = '' a = Actor(budget=self, partner=m.person, header=header) a.full_clean() a.save() @dd.virtualfield(dd.PriceField(_("Total debt"))) def total_debt(self, ar): if ar is None: return None return self.sum('amount', 'L') @dd.htmlbox(_("Entered data")) def data_box(self, ar): if ar is None: return '' # return E.div(*tuple(ar.story2html(self.data_story(ar)))) return ar.story2html(self.data_story(ar)) @dd.htmlbox(pgettext("debts", "Summary")) def summary_box(self, ar): if ar is None: return '' # return E.div(*tuple(ar.story2html(self.summary_story(ar)))) return ar.story2html(self.summary_story(ar)) def data_story(self, ar): """Yield a sequence of story items about the entered data.""" # logger.info("20141211 insert_story") def render(sar): if sar.renderer is None: raise Exception("%s has no renderer", sar) if sar.get_total_count(): yield E.h3(sar.get_title()) yield sar for eg in self.entry_groups(ar): yield render(eg.action_request) def summary_story(self, ar): def render(t): sar = ar.spawn(t, master_instance=self) if sar.get_total_count(): yield E.h2(str(sar.get_title())) yield sar yield render(ResultByBudget) yield render(DebtsByBudget) yield render(AssetsByBudget) yield render(DistByBudget) class Actor(ActorBase, SequencedBudgetComponent): """An **actor** of a budget is a partner who is part of the household for which the budget has been established. """ class Meta: verbose_name = _("Budget Actor") verbose_name_plural = _("Budget Actors") allow_cascaded_delete = ['budget'] partner = dd.ForeignKey('contacts.Partner', blank=True) header = models.CharField(_("Header"), max_length=20, blank=True) remark = dd.RichTextField(_("Remark"), format="html", blank=True) def save(self, *args, **kw): if not self.header: self.header = _("Actor") + " " + str(self.seqno) super(Actor, self).save(*args, **kw) class Entry(SequencedBudgetComponent): """A detail row of a :class:`Budget`. .. attribute:: budget The :class:`Budget` who contains this entry. .. attribute:: amount The amount of money. An empty amount is different from a zero amount in that the latter will be printed while the former not. .. attribute:: account The related :class:`Account`. """ class Meta: verbose_name = _("Budget Entry") verbose_name_plural = _("Budget Entries") # unique_together = ['budget','account','name'] # unique_together = ['actor','account'] allow_cascaded_delete = ['budget'] # group = dd.ForeignKey(AccountGroup) account_type = AccountTypes.field(blank=True) account = dd.ForeignKey('debts.Account') partner = dd.ForeignKey('contacts.Partner', blank=True, null=True) # name = models.CharField(_("Remark"),max_length=200,blank=True) # amount = dd.PriceField(_("Amount"),default=0) amount = dd.PriceField(_("Amount"), blank=True, null=True) actor = dd.ForeignKey(Actor, blank=True, null=True, help_text="""\ Hier optional einen Akteur angeben, wenn der Eintrag sich nicht auf den Gesamthaushalt bezieht.""") # amount = dd.PriceField(_("Amount"),default=0) circa = models.BooleanField(_("Circa"), default=False) distribute = models.BooleanField( _("Distribute"), default=False, help_text=u"""\ Ob diese Schuld in die Schuldenverteilung aufgenommen wird oder nicht.""" ) todo = models.CharField( verbose_name=_("To Do"), max_length=200, blank=True) remark = models.CharField(_("Remark"), max_length=200, blank=True, help_text=u"Bemerkungen sind intern und werden nie ausgedruckt.") description = models.CharField(_("Description"), max_length=200, blank=True, help_text=u"""\ Beschreibung wird automatisch mit der Kontobezeichung ausgefüllt. Kann man aber manuell ändern. Wenn man das Konto ändert, gehen manuelle Änderungen in diesem Feld verloren. Beim Ausdruck steht in Kolonne "Beschreibung" lediglich der Inhalt dieses Feldes, der eventuellen Bemerkung sowie (falls angegeben bei Schulden) der Partner.""") periods = PeriodsField(_("Periods"), help_text=u"""\ Gibt an, für wieviele Monate dieser Betrag sich versteht. Also bei monatlichen Ausgaben steht hier 1, bei jährlichen Ausgaben 12.""") monthly_rate = dd.PriceField(_("Monthly rate"), default=0, help_text=u""" Eventueller Betrag monatlicher Rückzahlungen, über deren Zahlung nicht verhandelt wird. Wenn hier ein Betrag steht, darf "Verteilen" nicht angekreuzt sein. """) bailiff = dd.ForeignKey( 'contacts.Company', verbose_name=_("Debt collection agency"), help_text=_("Leave empty for simple debts, otherwise select \ here the responsible bailiff or collection agency"), related_name='bailiff_debts_set', null=True, blank=True) # duplicated_fields = """ # account_type account partner actor distribute # circa todo remark description periods monthly_rate # """.split() def get_siblings(self): """ Like super(), but adds account_type. E.g. the Up/Down methods should work only within a given account_type. """ # return super(Entry,self).get_siblings().filter(account_type=self.account_type) return self.__class__.objects.filter( budget=self.budget, account_type=self.account_type) @dd.chooser() def account_choices(cls, account_type): # print '20120918 account_choices', account_type return rt.models.debts.Account.objects.filter(type=account_type) @dd.chooser() def bailiff_choices(self): qs = rt.models.contacts.Companies.request().data_iterator qs = qs.filter(client_contact_type__is_bailiff=True) return qs # @dd.chooser(simple_values=True) # def amount_choices(cls,account): # return [decimal.Decimal("0"),decimal.Decimal("2.34"),decimal.Decimal("12.34")] @dd.chooser() def actor_choices(cls, budget): return Actor.objects.filter(budget=budget).order_by('seqno') @dd.displayfield(_("Description")) def summary_description(row, ar): # chunks = [row.account] if row.description: desc = row.description # if row.partner: # chunks.append(row.partner) # return "%s/%s" join_words(unicode(row.account),unicode(row.partner),row.name) # return '/'.join([unicode(x) for x in words if x]) # return join_words(unicode(row.account),row.name) else: parts = [] if row.account_id: parts.append(str(row.account)) if row.partner_id: parts.append(str(row.partner)) desc = ' / '.join(parts) if row.todo: desc += " [%s]" % row.todo return desc def account_changed(self, ar): if self.account_id: self.periods = self.account.periods self.description = dd.babelattr( self.account, 'name', language=self.budget.partner.language) def full_clean(self, *args, **kw): if self.periods <= 0: raise ValidationError(_("Periods must be > 0")) if self.distribute and self.monthly_rate: raise ValidationError( # _("Cannot set both 'Distribute' and 'Monthly rate'")) _("Cannot set 'Distribute' when 'Monthly rate' is %r") % self.monthly_rate) # self.account_type = self.account.type # if not self.account_type: # ~ raise ValidationError(_("Budget entry #%d has no account_type") % obj2unicode(self)) super(Entry, self).full_clean(*args, **kw) def save(self, *args, **kw): # if not self.name: # if self.partner: # self.name = unicode(self.partner.name) # else: # self.name = self.account.name self.account_type = self.account.type if not self.description: self.description = dd.babelattr( self.account, 'name', language=self.budget.partner.language) # self.description = unicode(self.account) # if self.periods is None: # self.periods = self.account.periods super(Entry, self).save(*args, **kw) def on_duplicate(self, ar, master): """This is called when an entry has been duplicated. It is needed when we are doing a "related" duplication (initiated by the duplication of a Budget). In that case, `master` is not None but the new Budget that has been created. We now need to adapt the `actor` of this Entry by making it an actor of the new Budget. TODO: this method relies on the fact that related Actors get duplicated *before* related Entries. The order of `fklist` in `_lino_ddh` """ if master is not None and self.actor is not None and self.actor.budget != master: self.actor = master.actor_set.get(seqno=self.actor.seqno) super(Entry, self).on_duplicate(ar, master) dd.inject_field( 'clients.ClientContactType', 'is_bailiff', models.BooleanField( _("Debt collection agency"), default=False)) # dd.inject_field( # 'system.SiteConfig', # 'debts_bailiff_type', # dd.ForeignKey("clients.ClientContactType", # blank=True, null=True, # verbose_name=_("Bailiff"), # related_name='bailiff_type_sites', # help_text=_("Client contact type for Bailiff."))) dd.inject_field( 'system.SiteConfig', 'master_budget', dd.ForeignKey( "debts.Budget", blank=True, null=True, verbose_name=_("Master budget"), related_name='master_budget_sites', help_text=_("The budget whose content is to be \ copied into new budgets."))) def site_setup(site): for T in (site.modules.contacts.Partners, site.modules.contacts.Persons, site.modules.pcsw.Clients, site.modules.households.Households): # T.add_detail_tab('debts.BudgetsByPartner') T.add_detail_tab('debts', """ debts.BudgetsByPartner debts.ActorsByPartner """, dd.plugins.debts.verbose_name) # There are no `message_extractors` for `.odt` files. One workaround # is to manually repeat them here so that :command:`fab mm` finds # them. _("Financial situation") # Finanzielle Situation _("General information") # Allgemeine Auskünfte _("Name of debts mediator") # Name des Schuldnerberaters # _("Entered data") # Erfasste Daten # _("Summary") # Zusammenfassung # _("Conclusion") # Schlussfolgerung from .ui import *
agpl-3.0
-3,078,532,575,331,789,000
33.927476
100
0.593419
false
senior-zero/metanet
metanet/networks/nodes/node.py
1
4115
# @file node.py # Node implementation ## @package networks # @author Evtushenko Georgy # @date 05/03/2015 17:19:00 # @version 1.1 ## @mainpage Metanet documentation # @section intro_sec Introduction # Short script to demonstrate the use of doxygen. # # @section license_sec License #\verbatim This file is part of MetaNet. # # MetaNet is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # MetaNet is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with MetaNet. If not, see <http://www.gnu.org/licenses/>. # # (Этот файл — часть MetaNet. # # MetaNet - свободная программа: вы можете перераспространять ее и/или # изменять ее на условиях Стандартной общественной лицензии GNU в том виде, # в каком она была опубликована Фондом свободного программного обеспечения; # либо версии 3 лицензии, либо (по вашему выбору) любой более поздней # версии. # # MetaNet распространяется в надежде, что она будет полезной, # но БЕЗО ВСЯКИХ ГАРАНТИЙ; даже без неявной гарантии ТОВАРНОГО ВИДА # или ПРИГОДНОСТИ ДЛЯ ОПРЕДЕЛЕННЫХ ЦЕЛЕЙ. Подробнее см. в Стандартной # общественной лицензии GNU. # # Вы должны были получить копию Стандартной общественной лицензии GNU # вместе с этой программой. Если это не так, см. # <http://www.gnu.org/licenses/>.) #\endverbatim __author__ = 'Evtushenko Georgy' from abc import ABCMeta, abstractmethod ## Node class. #<p>Класс определяющий интерфейсы всех узлов - нейронов, # сетей, метасетей. Для хранения в networkx необходима реализация # ряда методов. Отвечает за присвоение уникального идентификатора # элементам библиотеки. Реализует механизм выплнения подписываемых # функций при срабатывании предиката. В нейронах проверка предикатов # происходит при применении функции активации, в сетях - при # изменении выходного сигнала, в метасетях - при расчёте сетей.</p> class Node(object): """Abstract class of network node (interface) """ __metaclass__ = ABCMeta counter=0 def __init__(self): Node.counter += 1 self.id = Node.counter self.name = '' self.do_when_activate = [] def add_action_on_active(self, function, unary_predicate): self.do_when_activate.append([function, unary_predicate]) def check_predicates(self): for func, pred in self.do_when_activate: if pred(self): func(self) def get_name(self): if not self.name: return self.id else: return self.name @abstractmethod def get_out_state(self): pass def __eq__(self, other): if isinstance(other, Node): return other.id == self.id else: return False def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.id)
gpl-3.0
-4,998,955,900,014,196,000
31.43
76
0.694016
false
google/timecast
tests/utils/test_ar.py
1
8728
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """timecast/tests/utils/test_ar.py""" import jax import jax.numpy as jnp import numpy as np import pytest from timecast.utils import random from timecast.utils.ar import _compute_xtx_inverse from timecast.utils.ar import _fit_constrained from timecast.utils.ar import _fit_unconstrained from timecast.utils.ar import _form_constraints from timecast.utils.ar import compute_gram from timecast.utils.ar import fit_gram from timecast.utils.ar import historify from timecast.utils.gram import OnlineGram def _compute_kernel_bias(X: np.ndarray, Y: np.ndarray, fit_intercept=True, alpha: float = 0.0): """Compute linear regression parameters""" num_samples, num_features = X.shape if fit_intercept: if num_features >= num_samples: X -= X.mean(axis=0) X = jnp.hstack((jnp.ones((X.shape[0], 1)), X)) reg = alpha * jnp.eye(X.shape[0 if num_features >= num_samples else 1]) if fit_intercept: reg = jax.ops.index_update(reg, [0, 0], 0) if num_features >= num_samples: beta = X.T @ jnp.linalg.inv(X @ X.T + reg) @ Y else: beta = jnp.linalg.inv(X.T @ X + reg) @ X.T @ Y if fit_intercept: return beta[1:], beta[0] else: return beta, [0] @pytest.mark.parametrize("m", [1, 10]) @pytest.mark.parametrize("n", [1, 10]) @pytest.mark.parametrize("history_len", [-1, 0, 1, 10]) def test_historify(m, n, history_len): """Test history-making""" X = jax.random.uniform(random.generate_key(), shape=(m, n)) if history_len < 1 or X.shape[0] - history_len < 0: with pytest.raises(ValueError): historify(X, history_len) else: batched = historify(X, history_len) batched = batched.reshape(batched.shape[0], -1) for i, batch in enumerate(batched): np.testing.assert_array_almost_equal(X[i : i + history_len].ravel().squeeze(), batch) @pytest.mark.parametrize("n", [100]) @pytest.mark.parametrize("input_dim", [1, 5]) @pytest.mark.parametrize("output_dim", [1, 4]) @pytest.mark.parametrize("history_len", [1, 3]) def test_compute_gram(n, input_dim, output_dim, history_len): """Test compouting gram matrices""" X = jax.random.uniform(random.generate_key(), shape=(n, input_dim)) Y = jax.random.uniform(random.generate_key(), shape=(n, output_dim)) XTX, XTY = compute_gram([(X, Y, None)], input_dim, output_dim, history_len) history = historify(X, history_len) history = history.reshape(history.shape[0], -1) np.testing.assert_array_almost_equal(history.T @ history, XTX.matrix(), decimal=4) np.testing.assert_array_almost_equal(history.T @ Y[history_len - 1 :], XTY.matrix(), decimal=4) def test_compute_gram_no_data(): """Test no data""" with pytest.raises(ValueError): compute_gram([(jnp.zeros((0, 1)), jnp.zeros((0, 1)), None)], 1, 1, 1) with pytest.raises(IndexError): compute_gram([], 1, 1, 1) def test_compute_gram_underdetermined(): """Test underdetermined""" data = jnp.ones((13, 10)) with pytest.raises(ValueError): compute_gram([(data, data, None)], 10, 10, 10) def test_fit_gram_underdetermined(): """Test underdetermined""" XTX = OnlineGram(1) XTY = XTX with pytest.raises(ValueError): fit_gram(XTX, XTY) @pytest.mark.parametrize( "history_len,input_dim,output_dim,fit_intercept,expected_R,expected_r", [ ( 3, 2, 1, False, np.array( [ [1.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, -1.0], ] ), np.zeros(3), ), ( 3, 2, 1, True, np.array( [ [1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0], ] ), np.zeros(4), ), (4, 1, 1, True, np.zeros((0, 5)), np.zeros((0))), ( 3, 2, 2, True, np.array( [ [1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0], ] ), np.zeros((4, 2)), ), ], ) def test_form_constraints( history_len, input_dim, output_dim, fit_intercept, expected_R, expected_r ): """Test forming constraints""" R, r = _form_constraints(input_dim, output_dim, history_len, fit_intercept) r = r.squeeze() assert np.array_equal(expected_R, R) assert np.array_equal(expected_r, r) @pytest.mark.parametrize("n", [40, 1000]) @pytest.mark.parametrize("input_dim", [1, 5]) @pytest.mark.parametrize("output_dim", [1, 4]) @pytest.mark.parametrize("history_len", [1, 3]) def test_fit_unconstrained(n, input_dim, output_dim, history_len): """Fit unconstrained regression""" # NOTE: we use random data because we want to test dimensions and # correctness vs a second implementation X = jax.random.uniform(random.generate_key(), shape=(n, input_dim)) Y = jax.random.uniform(random.generate_key(), shape=(n, output_dim)) XTX, XTY = compute_gram([(X, Y, None)], input_dim, output_dim, history_len) kernel, bias = fit_gram(XTX, XTY) n - history_len + 1 history = historify(X, history_len) history = history.reshape(history.shape[0], -1) expected_kernel, expected_bias = _compute_kernel_bias(history, Y[history_len - 1 :], alpha=1.0) expected_kernel = expected_kernel.reshape(1, history_len * input_dim, -1) np.testing.assert_array_almost_equal(expected_kernel, kernel, decimal=3) np.testing.assert_array_almost_equal(expected_bias, bias, decimal=3) @pytest.mark.parametrize("n", [1000]) @pytest.mark.parametrize("input_dim", [10, 12]) @pytest.mark.parametrize("output_dim", [1, 10]) @pytest.mark.parametrize("history_len", [2]) def test_fit_constrained(n, input_dim, output_dim, history_len): """Fit constrained regression""" # NOTE: we use random data because we want to test dimensions and # correctness vs a second implementation X = jax.random.uniform(random.generate_key(), shape=(n, input_dim)) Y = jax.random.uniform(random.generate_key(), shape=(n, output_dim)) XTX, XTY = compute_gram([(X, Y, None)], input_dim, output_dim, history_len) result = fit_gram(XTX, XTY, input_dim=input_dim) # Next, check that each chunk of input_dim features have the same coefficient # result = fit_gram(XTX, XTY, input_dim=input_dim) R, r = _form_constraints( input_dim=input_dim, output_dim=output_dim, history_len=history_len, fit_intercept=True, ) XTX = XTX.matrix(fit_intercept=True, input_dim=input_dim) XTY = XTY.matrix(fit_intercept=True, input_dim=input_dim) inv = _compute_xtx_inverse(XTX, alpha=1.0) beta = _fit_unconstrained(inv, XTY) beta = _fit_constrained(beta, inv, R, r) beta = beta.reshape(history_len + 1, input_dim, -1) assert np.sum([np.abs(x - x[0]) for x in beta]) < 1e-4 # Finally, check that resulting vector is of the correct length and the # values are self-consistent assert len(beta) == history_len + 1 beta = beta[:, 0] beta = beta[1:], beta[0] # Check final results np.testing.assert_array_almost_equal(beta[0], result[0]) np.testing.assert_array_almost_equal(beta[1], result[1]) def test_fit_constrained_bad_input_dim(): """Bad input for constrained""" XTX = OnlineGram(10) XTY = OnlineGram(5) XTX.update(jax.random.uniform(random.generate_key(), shape=(100, 10))) XTY.update(jax.random.uniform(random.generate_key(), shape=(100, 5))) with pytest.raises(ValueError): fit_gram(XTX, XTY, input_dim=7)
apache-2.0
2,209,846,768,034,088,700
33.634921
99
0.600596
false
bfalacerda/strands_executive
task_executor/scripts/fifo_task_executor.py
2
1910
#!/usr/bin/env python import rospy from Queue import Queue, Empty from strands_executive_msgs.msg import Task from task_executor.base_executor import BaseTaskExecutor from task_executor.sm_base_executor import AbstractTaskExecutor class FIFOTaskExecutor(AbstractTaskExecutor): def __init__(self): # init node first, must be done before call to super init for service advertising to work # rospy.init_node("task_executor", log_level=rospy.DEBUG) rospy.init_node("task_executor" ) # init superclasses super( FIFOTaskExecutor, self ).__init__() self.tasks = Queue() self.advertise_services() def add_tasks(self, tasks): """ Called with a new task for the executor """ rospy.loginfo('Called with %s tasks' % len(tasks)) for task in tasks: self.tasks.put(task) rospy.loginfo('Queued %s tasks' % len(tasks)) def task_demanded(self, previously_active_task): """ Called when a task is demanded. self.active_task is the demanded task (and is being executed) and previously_active_task was the task that was being executed (which could be None) """ if previously_active_task: self.add_tasks([previously_active_task]) def run_executor(self): r = rospy.Rate(1) # 1hz while not rospy.is_shutdown(): if self.executing: if not self.active_task: print "need a task" try: task = self.tasks.get(False) self.execute_task(task) except Empty, e: pass else: print "executing task %s" % self.active_task r.sleep() if __name__ == '__main__': executor = FIFOTaskExecutor() executor.run_executor()
mit
-8,832,723,550,932,087,000
33.107143
195
0.580628
false
softinus/IMDB_DataMiner
kobis.or.kr/AllMovies_kobis.py
1
5363
# written by python 2.* #-*- coding: utf-8 -*- import urllib2 import string import os from shutil import move import re import sys import time import glob from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC import ssl ssl._create_default_https_context = ssl._create_unverified_context reload(sys) sys.setdefaultencoding('utf-8') # currError = Parsing_err(page_num=0, entity_num=1, err_url="test", status="pending") # currError.save() # print "saved" USER_NAME= os.getenv('username') def move_to_download_folder(downloadPath, newFileName, fileExtension): got_file = False ## Grab current file name. while got_file == False: try: currentFile = glob.glob(downloadPath + "*.xls") if len(currentFile) != 0: got_file = True except: print "File has not finished downloading" time.sleep(1) # Validate windows path by regex cleaned_up_filename = re.sub(r'[/\\:*?"<>|]', '', newFileName) ## Create new file name #fileDestination = "C:\\Users\\" + USER_NAME +"\\Source\\Repos\\Movie_DataMiner\\KOBIS_download\\" + cleaned_up_filename +"." + fileExtension fileDestination = ".\\KOBIS_download2\\" + cleaned_up_filename +"." + fileExtension print "fileDestination : " +fileDestination #os.rename(currentFile[0], fileDestination) move(currentFile[0], fileDestination) return def FindAndAcceptAlert(browser): try: alert = None while(True): # check till alert is popped up alert = isAlertPresent(browser) if alert is not None: alert.accept() return except: return def isAlertPresent(browser): try: alert= browser.driver.switch_to_alert() return alert except NoAlertPresentException: return None def extractMovieNum(txt): movieNum= re.sub(r'\D', "", txt) # sub non-digits by regex if len(movieNum) == 8: return movieNum else: del1= txt.find(",") del2= txt.find(")") movieNum= txt[del1+1:del2].replace("'","") if len(movieNum) == 8: return movieNum else: return None def parseThisPage(browser, page_num): browser.execute_script("goPage('" + str(page_num) + "')" ) time.sleep(1) soup = BeautifulSoup(browser.page_source, "lxml") table= soup.find("table", {"class":"boardList03"}) arrMovies= table.tbody.find_all("tr") for idx,movie in enumerate(arrMovies): click_content= movie.td.a['onclick'] movieName= movie.td.a.text movieNum= extractMovieNum(click_content) if movieNum: print movieNum +","+ movieName # dtlExcelDn('movie','box','20080828'); browser.execute_script("dtlExcelDn('movie','box','" + movieNum + "');" ) #print result #wait = WebDriverWait(browser, 10) #wait.until(EC.alert_is_present) time.sleep(1) alert= browser.switch_to_alert() alert.accept() move_to_download_folder("C:\\Users\\"+USER_NAME+"\\Downloads\\", movieName + "_"+ str(movieNum) + "_" + str(page_num) +"_"+str(idx), "xls") #FindAndAcceptAlert(browser) else: print "couldn't find movie corresponding with : "+ movieName url = ("http://kobis.or.kr/kobis/business/mast/mvie/searchMovieList.do") print url MOVIES_PER_PAGE= 10 #Chrome driver setting #options = webdriver.ChromeOptions() #options.add_argument("download.default_directory=./KOBIS_download") #browser = webdriver.Chrome(chrome_options=options) browser = webdriver.Chrome() # Firefox : To prevent download dialog # profile = webdriver.FirefoxProfile() # profile.set_preference('browser.download.folderList', 2) # custom location # profile.set_preference('browser.download.manager.showWhenStarting', False) # profile.set_preference('browser.download.dir', '/KOBIS_download') # profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/vnd.ms-excel') #browser = webdriver.Firefox() browser.implicitly_wait(3) # seconds browser.get(url) try: elem = browser.find_element_by_name('sOpenYearS') elem.send_keys('2004-01-01') elem = browser.find_element_by_name('sOpenYearE') elem.send_keys('2016-12-31') browser.execute_script("fn_searchList();") time.sleep(0.5) soup = BeautifulSoup(browser.page_source, "lxml") countMovies= soup.find("div", { "class":"board_btm" }) countMovies_filtered= re.sub(r'\D', "", countMovies.em.text) print "retrieved movies : "+countMovies_filtered TOTAL_PAGES = (int(countMovies_filtered) / MOVIES_PER_PAGE)+2 print "total pages : "+ str(TOTAL_PAGES) STARTING_PAGE= 958 for x in range(STARTING_PAGE, TOTAL_PAGES): print "current page : " + str(x) parseThisPage(browser, x) except Exception, e: print str(e) finally: browser.quit()
apache-2.0
2,587,069,389,925,057,000
31.509091
151
0.655044
false
XereoNet/SpaceGDN
loader/resources/mojang_loader.py
1
2189
import requests import json import datetime import re import math from ..resource_bases import Downloader class Mojang(Downloader): url = 'http://s3.amazonaws.com/Minecraft.Download/versions/versions.json' download_url_base = 'https://s3.amazonaws.com/Minecraft.Download/versions/{0}/minecraft_server.{0}.jar' def __init__(self): pass def get_description(self, t): if t in self.descriptions: return self.descriptions[t] return '' def parse_version(self, version): if version['type'].startswith('old_'): return url = self.download_url_base.format(version['id']) released = datetime.datetime.strptime(re.sub(r'\+[0-9]{2}:[0-9]{2}$', '', version['releaseTime']), '%Y-%m-%dT%H:%M:%S') build = math.floor(released.timestamp() / 100) return { '$parents': [ { '$id': 'minecraft', 'resource': 'game', 'name': 'Minecraft' }, { '$id': 'vanilla', 'resource': 'type', 'name': 'Vanilla Minecraft', 'author': 'Mojang', 'description': '' }, { '$id': version['type'], 'resource': 'channel', 'name': ' '.join([n.capitalize() for n in version['type'].split('_')]) }, { '$id': version['id'], 'resource': 'version', 'version': version['id'], 'last_build': build, } ], '$id': version['id'], '$load': lambda path: self.download(url, path), '$patched': False, 'resource': 'build', 'created': released, 'build': build, 'url': url, } def get_versions(self): r = requests.get(self.url) data = json.loads(r.text) return data['versions'] def items(self): return map(self.parse_version, self.get_versions())
agpl-3.0
8,281,218,096,187,848,000
29.402778
107
0.454545
false
opennode/waldur-mastermind
src/waldur_core/structure/tests/test_customer_permissions.py
1
24049
import collections import datetime from unittest import mock from django.contrib.auth import get_user_model from django.utils import timezone from rest_framework import status, test from rest_framework.reverse import reverse from waldur_core.core.tests.helpers import override_waldur_core_settings from waldur_core.structure import tasks from waldur_core.structure.models import CustomerPermission, CustomerRole, ProjectRole from waldur_core.structure.tests import factories User = get_user_model() TestRole = collections.namedtuple('TestRole', ['user', 'customer', 'role']) TestRole.__test__ = False class CustomerPermissionBaseTest(test.APITransactionTestCase): all_roles = ( # user customer role TestRole('first', 'first', 'owner'), TestRole('second', 'second', 'owner'), ) role_map = { 'owner': CustomerRole.OWNER, } def setUp(self): self.users = { 'staff': factories.UserFactory(is_staff=True), 'first': factories.UserFactory(), 'first_manager': factories.UserFactory(), 'first_admin': factories.UserFactory(), 'second': factories.UserFactory(), 'no_role': factories.UserFactory(), } self.customers = { 'first': factories.CustomerFactory(), 'second': factories.CustomerFactory(), } customer = self.customers['first'] project = factories.ProjectFactory(customer=customer) for user, customer, role in self.all_roles: self.customers[customer].add_user(self.users[user], self.role_map[role]) project.add_user(self.users['first_admin'], ProjectRole.ADMINISTRATOR) # Helper methods def _get_permission_url(self, user, customer, role): permission = CustomerPermission.objects.get( user=self.users[user], role=self.role_map[role], customer=self.customers[customer], ) return 'http://testserver' + reverse( 'customer_permission-detail', kwargs={'pk': permission.pk} ) class CustomerPermissionListTest(CustomerPermissionBaseTest): def test_anonymous_user_cannot_list_customer_permissions(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_user_cannot_list_roles_of_customer_he_is_not_affiliated(self): self.assert_user_access_to_permission_list( user='no_role', customer='first', should_see=False ) self.assert_user_access_to_permission_list( user='no_role', customer='second', should_see=False ) def test_customer_owner_can_list_roles_of_his_customer(self): self.assert_user_access_to_permission_list( user='first', customer='first', should_see=True ) def test_project_admin_can_list_roles_of_his_customer(self): self.assert_user_access_to_permission_list( user='first_admin', customer='first', should_see=True ) def test_staff_can_list_roles_of_any_customer(self): self.assert_user_access_to_permission_list( user='staff', customer='first', should_see=True ) self.assert_user_access_to_permission_list( user='staff', customer='second', should_see=True ) def test_customer_owner_cannot_list_roles_of_another_customer(self): self.assert_user_access_to_permission_list( user='first', customer='second', should_see=False ) def test_project_admin_cannot_list_roles_of_another_customer(self): self.assert_user_access_to_permission_list( user='first_admin', customer='second', should_see=False ) def assert_user_access_to_permission_list(self, user, customer, should_see): self.client.force_authenticate(user=self.users[user]) response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) expected_urls = { r: self._get_permission_url(*r) for r in self.all_roles if r.customer == customer } actual_urls = set([role['url'] for role in response.data]) for role, role_url in expected_urls.items(): if should_see: self.assertIn( role_url, actual_urls, '{0} user does not see privilege ' 'he is supposed to see: {1}'.format(user, role), ) else: self.assertNotIn( role_url, actual_urls, '{0} user sees privilege ' 'he is not supposed to see: {1}'.format(user, role), ) class CustomerPermissionGrantTest(CustomerPermissionBaseTest): @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=True) def test_customer_owner_can_grant_new_role_within_his_customer(self): self.assert_user_access_to_permission_granting( login_user='first', affected_user='no_role', affected_customer='first', expected_status=status.HTTP_201_CREATED, ) def test_customer_owner_cannot_grant_existing_role_within_his_customer(self): self.assert_user_access_to_permission_granting( login_user='staff', affected_user='first', affected_customer='first', expected_status=status.HTTP_400_BAD_REQUEST, expected_payload={ 'non_field_errors': [ 'The fields customer and user must make a unique set.' ], }, ) def test_customer_owner_cannot_grant_role_within_another_customer(self): self.assert_user_access_to_permission_granting( login_user='first', affected_user='no_role', affected_customer='second', expected_status=status.HTTP_400_BAD_REQUEST, expected_payload={ 'customer': ['Invalid hyperlink - Object does not exist.'], }, ) @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=False) def test_customer_owner_can_not_grant_new_role_within_his_customer_if_settings_are_tweaked( self, ): self.assert_user_access_to_permission_granting( login_user='first', affected_user='no_role', affected_customer='first', expected_status=status.HTTP_403_FORBIDDEN, ) @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=True) def test_project_admin_cannot_grant_role_within_his_customer(self): self.assert_user_access_to_permission_granting( login_user='first_admin', affected_user='no_role', affected_customer='first', expected_status=status.HTTP_403_FORBIDDEN, expected_payload={ 'detail': 'You do not have permission to perform this action.', }, ) def test_staff_can_grant_new_role_within_any_customer(self): self.assert_user_access_to_permission_granting( login_user='staff', affected_user='no_role', affected_customer='first', expected_status=status.HTTP_201_CREATED, ) self.assert_user_access_to_permission_granting( login_user='staff', affected_user='no_role', affected_customer='second', expected_status=status.HTTP_201_CREATED, ) def test_staff_cannot_grant_existing_role_within_any_customer(self): self.assert_user_access_to_permission_granting( login_user='staff', affected_user='first', affected_customer='first', expected_status=status.HTTP_400_BAD_REQUEST, expected_payload={ 'non_field_errors': [ 'The fields customer and user must make a unique set.' ], }, ) self.assert_user_access_to_permission_granting( login_user='staff', affected_user='second', affected_customer='second', expected_status=status.HTTP_400_BAD_REQUEST, expected_payload={ 'non_field_errors': [ 'The fields customer and user must make a unique set.' ], }, ) def assert_user_access_to_permission_granting( self, login_user, affected_user, affected_customer, expected_status, expected_payload=None, ): self.client.force_authenticate(user=self.users[login_user]) data = { 'customer': factories.CustomerFactory.get_url( self.customers[affected_customer] ), 'user': factories.UserFactory.get_url(self.users[affected_user]), 'role': 'owner', } response = self.client.post(reverse('customer_permission-list'), data) self.assertEqual(response.status_code, expected_status) if expected_payload is not None: self.assertDictContainsSubset(expected_payload, response.data) class CustomerPermissionRevokeTest(CustomerPermissionBaseTest): @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=True) def test_customer_owner_can_revoke_role_within_his_customer(self): self.assert_user_access_to_permission_revocation( login_user='first', affected_user='first', affected_customer='first', expected_status=status.HTTP_204_NO_CONTENT, ) def test_customer_owner_cannot_revoke_role_within_another_customer(self): self.assert_user_access_to_permission_revocation( login_user='first', affected_user='second', affected_customer='second', expected_status=status.HTTP_404_NOT_FOUND, ) @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=False) def test_customer_owner_can_not_revoke_role_within_his_customer_if_settings_are_tweaked( self, ): self.assert_user_access_to_permission_revocation( login_user='first', affected_user='first', affected_customer='first', expected_status=status.HTTP_403_FORBIDDEN, ) @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=True) def test_project_admin_cannot_revoke_role_within_his_customer(self): self.assert_user_access_to_permission_revocation( login_user='first_admin', affected_user='first', affected_customer='first', expected_status=status.HTTP_403_FORBIDDEN, expected_payload={ 'detail': 'You do not have permission to perform this action.', }, ) def test_staff_can_revoke_role_within_any_customer(self): self.assert_user_access_to_permission_revocation( login_user='staff', affected_user='first', affected_customer='first', expected_status=status.HTTP_204_NO_CONTENT, ) self.assert_user_access_to_permission_revocation( login_user='staff', affected_user='second', affected_customer='second', expected_status=status.HTTP_204_NO_CONTENT, ) def assert_user_access_to_permission_revocation( self, login_user, affected_user, affected_customer, expected_status, expected_payload=None, ): self.client.force_authenticate(user=self.users[login_user]) url = self._get_permission_url(affected_user, affected_customer, 'owner') response = self.client.delete(url) self.assertEqual(response.status_code, expected_status) if expected_payload is not None: self.assertDictContainsSubset(expected_payload, response.data) class CustomerPermissionFilterTest(test.APITransactionTestCase): def setUp(self): staff_user = factories.UserFactory(is_staff=True) self.client.force_authenticate(user=staff_user) self.users = { 'first': factories.UserFactory(), 'second': factories.UserFactory(), } self.customers = { 'first': factories.CustomerFactory(), 'second': factories.CustomerFactory(), } for customer in self.customers: self.customers[customer].add_user(self.users['first'], CustomerRole.OWNER) self.customers[customer].add_user(self.users['second'], CustomerRole.OWNER) def test_staff_user_can_filter_roles_within_customer_by_customer_uuid(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) for customer in self.customers: response = self.client.get( reverse('customer_permission-list'), data={'customer': self.customers[customer].uuid.hex}, ) self.assertEqual(response.status_code, status.HTTP_200_OK) customer_url = self._get_customer_url(self.customers[customer]) for permission in response.data: self.assertEqual(customer_url, permission['customer']) def test_staff_user_can_filter_roles_within_customer_by_username(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) for user in self.users: self._ensure_matching_entries_in('username', self.users[user].username) self._ensure_non_matching_entries_not_in( 'username', self.users[user].username ) def test_staff_user_can_filter_roles_within_customer_by_native_name(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) for user in self.users: self._ensure_matching_entries_in( 'native_name', self.users[user].native_name ) self._ensure_non_matching_entries_not_in( 'native_name', self.users[user].native_name ) def test_staff_user_can_filter_roles_within_customer_by_full_name(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) for user in self.users: self._ensure_matching_entries_in('full_name', self.users[user].full_name) self._ensure_non_matching_entries_not_in( 'full_name', self.users[user].full_name ) def test_staff_user_can_filter_roles_within_customer_by_role_name(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) response = self.client.get( reverse('customer_permission-list'), data={'role': 'owner'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) for permission in response.data: self.assertEqual('owner', permission['role']) def test_staff_user_can_see_required_fields_in_filtration_response(self): response = self.client.get(reverse('customer_permission-list')) self.assertEqual(response.status_code, status.HTTP_200_OK) for customer in self.customers: response = self.client.get( reverse('customer_permission-list'), data={'customer': self.customers[customer].uuid.hex}, ) self.assertEqual(response.status_code, status.HTTP_200_OK) required_fields = ( 'url', 'user_native_name', 'user_full_name', 'user_username', ) for permission in response.data: for field in required_fields: self.assertIn(field, permission) # Helper methods def _ensure_matching_entries_in(self, field, value): response = self.client.get( reverse('customer_permission-list'), data={field: value} ) self.assertEqual(response.status_code, status.HTTP_200_OK) for permission in response.data: self.assertEqual(value, permission['user_' + field]) def _ensure_non_matching_entries_not_in(self, field, value): user = factories.UserFactory() customer = factories.CustomerFactory() customer.add_user(user, CustomerRole.OWNER) response = self.client.get( reverse('customer_permission-list'), data={field: getattr(user, field)} ) self.assertEqual(response.status_code, status.HTTP_200_OK) for permission in response.data: self.assertNotEqual(value, permission['user_' + field]) def _get_customer_url(self, customer): return 'http://testserver' + reverse( 'customer-detail', kwargs={'uuid': customer.uuid.hex} ) class CustomerPermissionExpirationTest(test.APITransactionTestCase): def setUp(self): permission = factories.CustomerPermissionFactory() self.user = permission.user self.customer = permission.customer self.url = factories.CustomerPermissionFactory.get_url(permission) def test_user_can_not_update_permission_expiration_time_for_himself(self): self.client.force_authenticate(user=self.user) expiration_time = timezone.now() + datetime.timedelta(days=100) response = self.client.put(self.url, {'expiration_time': expiration_time}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_staff_can_update_permission_expiration_time_for_any_user(self): staff_user = factories.UserFactory(is_staff=True) self.client.force_authenticate(user=staff_user) expiration_time = timezone.now() + datetime.timedelta(days=100) response = self.client.put(self.url, {'expiration_time': expiration_time}) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data['expiration_time'], expiration_time, response.data ) @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=True) def test_owner_can_update_permission_expiration_time_for_other_owner_in_same_customer( self, ): owner = factories.UserFactory() self.customer.add_user(owner, CustomerRole.OWNER) self.client.force_authenticate(user=owner) expiration_time = timezone.now() + datetime.timedelta(days=100) response = self.client.put(self.url, {'expiration_time': expiration_time}) self.assertEqual(response.status_code, status.HTTP_200_OK, response.data) self.assertEqual( response.data['expiration_time'], expiration_time, response.data ) @override_waldur_core_settings(OWNERS_CAN_MANAGE_OWNERS=False) def test_owner_can_not_update_permission_expiration_time_for_other_owner_if_settings_are_tweaked( self, ): owner = factories.UserFactory() self.customer.add_user(owner, CustomerRole.OWNER) self.client.force_authenticate(user=owner) expiration_time = timezone.now() + datetime.timedelta(days=100) response = self.client.put(self.url, {'expiration_time': expiration_time}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_user_can_not_set_permission_expiration_time_lower_than_current(self): staff_user = factories.UserFactory(is_staff=True) self.client.force_authenticate(user=staff_user) expiration_time = timezone.now() - datetime.timedelta(days=100) response = self.client.put(self.url, {'expiration_time': expiration_time}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_user_cannot_grant_permissions_with_greater_expiration_time(self): expiration_time = timezone.now() + datetime.timedelta(days=100) permission = factories.CustomerPermissionFactory( expiration_time=expiration_time ) self.client.force_authenticate(user=permission.user) response = self.client.post( factories.CustomerPermissionFactory.get_list_url(), { 'customer': factories.CustomerFactory.get_url( customer=permission.customer ), 'user': factories.UserFactory.get_url(), 'role': CustomerRole.OWNER, 'expiration_time': expiration_time + datetime.timedelta(days=1), }, ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_user_can_set_expiration_time_when_role_is_created(self): staff_user = factories.UserFactory(is_staff=True) self.client.force_authenticate(user=staff_user) expiration_time = timezone.now() + datetime.timedelta(days=100) response = self.client.post( factories.CustomerPermissionFactory.get_list_url(), { 'customer': factories.CustomerFactory.get_url(), 'user': factories.UserFactory.get_url(), 'role': factories.CustomerPermissionFactory.role, 'expiration_time': expiration_time, }, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data['expiration_time'], expiration_time, response.data ) def test_task_revokes_expired_permissions(self): expired_permission = factories.CustomerPermissionFactory( expiration_time=timezone.now() - datetime.timedelta(days=100) ) not_expired_permission = factories.CustomerPermissionFactory( expiration_time=timezone.now() + datetime.timedelta(days=100) ) tasks.check_expired_permissions() self.assertFalse( expired_permission.customer.has_user( expired_permission.user, expired_permission.role ) ) self.assertTrue( not_expired_permission.customer.has_user( not_expired_permission.user, not_expired_permission.role ) ) def test_when_expiration_time_is_updated_event_is_emitted(self): staff_user = factories.UserFactory(is_staff=True) self.client.force_authenticate(user=staff_user) expiration_time = timezone.now() + datetime.timedelta(days=100) with mock.patch('logging.LoggerAdapter.info') as mocked_info: self.client.put(self.url, {'expiration_time': expiration_time}) (args, kwargs) = mocked_info.call_args_list[-1] event_type = kwargs['extra']['event_type'] event_message = args[0] self.assertEqual(event_type, 'role_updated') self.assertTrue(staff_user.full_name in event_message) class CustomerPermissionCreatedByTest(test.APITransactionTestCase): def test_user_which_granted_permission_is_stored(self): staff_user = factories.UserFactory(is_staff=True) self.client.force_authenticate(user=staff_user) user = factories.UserFactory() customer = factories.CustomerFactory() data = { 'customer': factories.CustomerFactory.get_url(customer), 'user': factories.UserFactory.get_url(user), 'role': CustomerRole.OWNER, } response = self.client.post(reverse('customer_permission-list'), data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) permission = CustomerPermission.objects.get(pk=response.data['pk']) self.assertEqual(permission.created_by, staff_user)
mit
2,955,888,555,649,084,400
38.231648
101
0.624433
false
hmml/ev3
source/conf.py
1
8346
# -*- coding: utf-8 -*- # # Lego Mindstorms EV3 documentation build configuration file, created by # sphinx-quickstart on Tue Jan 7 20:37:24 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('../ev3')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Lego Mindstorms EV3' copyright = u'2014, hmml' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = 'alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'LegoMindstormsEV3doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'LegoMindstormsEV3.tex', u'Lego Mindstorms EV3 Documentation', u'hmml', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'legomindstormsev3', u'Lego Mindstorms EV3 Documentation', [u'hmml'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'LegoMindstormsEV3', u'Lego Mindstorms EV3 Documentation', u'hmml', 'LegoMindstormsEV3', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False autodoc_member_order = 'bysource'
apache-2.0
-1,305,270,725,964,522,000
30.617424
79
0.708603
false
rishirajsinghjhelumi/Entity-Mining
assets/omdb.py-master/setup.py
1
1127
""" omdb ---- Python wrapper for OMDbAPI.com. Documentation: https://github.com/dgilland/omdb.py """ from setuptools import setup setup( name='omdb', version='0.1.1', description='Python wrapper for OMDb API: http://www.omdbapi.com/', long_description=__doc__, author='Derrick Gilland', author_email='[email protected]', url='https://github.com/dgilland/omdb.py', packages=['omdb'], install_requires=['requests>=2.0.1', 'six'], test_suite='tests', keywords='omdb imdb movies', license='BSD', classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4' ] )
gpl-2.0
4,260,000,430,900,551,000
27.897436
71
0.605146
false
schef/schef.github.io
source/16/mc-16-01-03-emajor-1black.py
1
1566
#!/usr/bin/python3 #import time import random import imp modl = imp.load_source('ppFunctions', '../00/ppFunctions.py') import os from ppFunctions import * from termcolor import colored, cprint #sleep becouse of loading midi modules print("Are you ready?") time.sleep(1) print_status = lambda x: cprint(x, 'white', 'on_blue') print_help = lambda x: cprint(x, 'red') hit = 0 rounde = 1 done = False generatedList = [] for i in range(stringToMidiNum("c"), stringToMidiNum("c'")+1): if i%12 in blackTonesBase: generatedList.append(i) while True: try: os.system('clear') print_status("Status: round=" + str(rounde) + ", hit=" + str(hit)) print_help("Help: rEPEAT sKIP") playHarmonicNotes(stringToMidiNum("e gis h")) randomNote = random.choice(generatedList) playNote(randomNote) while not done: guessedNote = input("Your input:") if guessedNote == "r": print("Repeating...") playHarmonicNotes(stringToMidiNum("e gis h")) playNote(randomNote) elif guessedNote == "s": print("Skiping...") done = True elif guessedNote not in lilypondTones: print("What? Syntax error!") else: if (lilypondTones[guessedNote] == randomNote%12): print("Yea!") hit += 1 rounde += 1 done = True else: print("Almost!") hit = 0 done = False except (KeyboardInterrupt): print('...Program Stopped Manually!') raise
mit
3,745,416,500,357,742,600
27.472727
70
0.590677
false
misaksen/umediaproxy
mediaproxy/scheduler.py
1
1397
# Copyright (C) 2007-2008 Dan Pascu <[email protected]> # """Schedule calls on the twisted reactor""" __all__ = ['RecurrentCall', 'KeepRunning'] from time import time class KeepRunning: """Return this class from a recurrent function to indicate that it should keep running""" pass class RecurrentCall(object): """Execute a function repeatedly at the given interval, until signaled to stop""" def __init__(self, period, func, *args, **kwargs): from twisted.internet import reactor self.func = func self.args = args self.kwargs = kwargs self.period = period self.now = None self.next = None self.callid = reactor.callLater(period, self) def __call__(self): from twisted.internet import reactor self.callid = None if self.now is None: self.now = time() self.next = self.now + self.period else: self.now, self.next = self.next, self.next + self.period result = self.func(*self.args, **self.kwargs) if result is KeepRunning: delay = max(self.next-time(), 0) self.callid = reactor.callLater(delay, self) def cancel(self): if self.callid is not None: try: self.callid.cancel() except ValueError: pass self.callid = None
gpl-2.0
-3,097,894,996,152,195,000
28.104167
93
0.58912
false
zpincus/RisWidget
ris_widget/async_texture.py
1
7054
# This code is licensed under the MIT License (see LICENSE file for details) import contextlib import threading import queue # import weakref import ctypes import numpy from OpenGL import GL from PyQt5 import Qt from . import shared_resources IMAGE_TYPE_TO_GL_FORMATS = { 'G': (GL.GL_R32F, GL.GL_RED), 'Ga': (GL.GL_RG32F, GL.GL_RG), 'rgb': (GL.GL_RGB32F, GL.GL_RGB), 'rgba': (GL.GL_RGBA32F, GL.GL_RGBA) } NUMPY_DTYPE_TO_GL_PIXEL_TYPE = { numpy.bool8 : GL.GL_UNSIGNED_BYTE, numpy.uint8 : GL.GL_UNSIGNED_BYTE, numpy.uint16 : GL.GL_UNSIGNED_SHORT, numpy.float32: GL.GL_FLOAT} USE_BG_UPLOAD_THREAD = True # debug flag for testing with flaky drivers class AsyncTexture: # _LIVE_TEXTURES = None # @classmethod # def _on_about_to_quit(cls): # with shared_resources.offscreen_context(): # for t in cls._LIVE_TEXTURES: # t.destroy() def __init__(self, image): # if self._LIVE_TEXTURES is None: # # if we used 'self' instead of __class__, would just set _LIVE_TEXTURES for this instance # __class__._LIVE_TEXTURES = weakref.WeakSet() # Qt.QApplication.instance().aboutToQuit.connect(self._on_about_to_quit) self.data = image.data self.format, self.source_format = IMAGE_TYPE_TO_GL_FORMATS[image.type] self.source_type = NUMPY_DTYPE_TO_GL_PIXEL_TYPE[self.data.dtype.type] self.ready = threading.Event() self.status = 'waiting' self.texture = None def upload(self, upload_region=None): if self.texture is None and upload_region is not None: raise ValueError('The first time the texture is uploaded, the full region must be used.') if self.ready.is_set(): # if the texture was already uploaded and done is set, make sure to # reset it so that bind waits for this new upload. self.ready.clear() self.status = 'uploading' if USE_BG_UPLOAD_THREAD: OffscreenContextThread.get().enqueue(self._upload, [upload_region]) else: self._upload_fg(upload_region) def bind(self, tex_unit): if not self.status in ('uploading', 'uploaded'): raise RuntimeError('Cannot bind texture that has not been first uploaded') self.ready.wait() if hasattr(self, 'exception'): raise self.exception assert self.texture is not None GL.glActiveTexture(GL.GL_TEXTURE0 + tex_unit) GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture) def destroy(self): if self.texture is not None: # requires a valid context assert Qt.QOpenGLContext.currentContext() is not None GL.glDeleteTextures([self.texture]) self.texture = None self.status = 'waiting' def _upload_fg(self, upload_region): assert Qt.QOpenGLContext.currentContext() is not None orig_unpack_alignment = GL.glGetIntegerv(GL.GL_UNPACK_ALIGNMENT) GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1) try: self._upload(upload_region) finally: # QPainter font rendering for OpenGL surfaces can break if we do not restore GL_UNPACK_ALIGNMENT # and this function was called within QPainter's native painting operations GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, orig_unpack_alignment) def _upload(self, upload_region): try: if self.texture is None: self.texture = GL.glGenTextures(1) alloc_texture = True else: alloc_texture = False GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture) w, h = self.data.shape if alloc_texture: GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 6) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE) GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE) GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, self.format, w, h, 0, self.source_format, self.source_type, self.data.ctypes.data_as(ctypes.c_void_p)) else: # texture already exists if upload_region is None: x = y = 0 data = self.data else: x, y, w, h = upload_region data = self.data[x:x+w, y:y+h] try: GL.glPixelStorei(GL.GL_UNPACK_ROW_LENGTH, self.data.shape[0]) GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, x, y, w, h, self.source_format, self.source_type, data.ctypes.data_as(ctypes.c_void_p)) finally: GL.glPixelStorei(GL.GL_UNPACK_ROW_LENGTH, 0) # whether or not allocating texture, need to regenerate mipmaps GL.glGenerateMipmap(GL.GL_TEXTURE_2D) # need glFinish to make sure that the GL calls (which run asynchronously) # have completed before we set self.ready GL.glFinish() self.status = 'uploaded' except Exception as e: self.exception = e finally: self.ready.set() class OffscreenContextThread(Qt.QThread): _ACTIVE_THREAD = None @classmethod def get(cls): if cls._ACTIVE_THREAD is None: cls._ACTIVE_THREAD = cls() # TODO: is below necessary ever? # Qt.QApplication.instance().aboutToQuit.connect(cls._ACTIVE_THREAD.shut_down) return cls._ACTIVE_THREAD def __init__(self): super().__init__() self.queue = queue.Queue() self.running = True self.start() def enqueue(self, func, args): self.queue.put((func, args)) # def shut_down(self): # self.running = False # # now wake up the thread if it's blocked waiting for a texture # self.queue.put(None) # self.wait() def run(self): gl_context = Qt.QOpenGLContext() gl_context.setShareContext(Qt.QOpenGLContext.globalShareContext()) gl_context.setFormat(shared_resources.GL_QSURFACE_FORMAT) if not gl_context.create(): raise RuntimeError('Failed to create OpenGL context for background texture upload thread.') gl_context.makeCurrent(shared_resources.OFFSCREEN_SURFACE) GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1) try: while self.running: func, args = self.queue.get() if not self.running: #self.running may go to false while blocked waiting on the queue break func(*args) finally: gl_context.doneCurrent()
mit
8,384,180,688,210,497,000
38.858757
108
0.594982
false
gem/oq-engine
openquake/commonlib/source_reader.py
1
14907
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2019, GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import copy import os.path import pickle import operator import logging import zlib import numpy from openquake.baselib import parallel, general from openquake.hazardlib import nrml, sourceconverter, InvalidFile from openquake.hazardlib.lt import apply_uncertainties TWO16 = 2 ** 16 # 65,536 by_id = operator.attrgetter('source_id') CALC_TIME, NUM_SITES, EFF_RUPTURES, TASK_NO = 3, 4, 5, 7 def trt_smrs(src): return tuple(src.trt_smrs) def read_source_model(fname, converter, monitor): """ :param fname: path to a source model XML file :param converter: SourceConverter :param monitor: a Monitor instance :returns: a SourceModel instance """ [sm] = nrml.read_source_models([fname], converter) return {fname: sm} # NB: called after the .checksum has been stored in reduce_sources def _check_dupl_ids(src_groups): sources = general.AccumDict(accum=[]) for sg in src_groups: for src in sg.sources: sources[src.source_id].append(src) first = True for src_id, srcs in sources.items(): if len(srcs) > 1: # duplicate IDs with different checksums, see cases 11, 13, 20 for i, src in enumerate(srcs): src.source_id = '%s;%d' % (src.source_id, i) if first: logging.info('There are multiple different sources with ' 'the same ID %s', srcs) first = False def get_csm(oq, full_lt, h5=None): """ Build source models from the logic tree and to store them inside the `source_full_lt` dataset. """ converter = sourceconverter.SourceConverter( oq.investigation_time, oq.rupture_mesh_spacing, oq.complex_fault_mesh_spacing, oq.width_of_mfd_bin, oq.area_source_discretization, oq.minimum_magnitude, oq.source_id, discard_trts=oq.discard_trts) classical = not oq.is_event_based() full_lt.ses_seed = oq.ses_seed if oq.is_ucerf(): [grp] = nrml.to_python(oq.inputs["source_model"], converter) src_groups = [] for grp_id, sm_rlz in enumerate(full_lt.sm_rlzs): sg = copy.copy(grp) src_groups.append(sg) src = sg[0].new(sm_rlz.ordinal, sm_rlz.value) # one source src.checksum = src.grp_id = src.id = src.trt_smr = grp_id src.samples = sm_rlz.samples logging.info('Reading sections and rupture planes for %s', src) planes = src.get_planes() if classical: src.ruptures_per_block = oq.ruptures_per_block sg.sources = list(src) for s in sg: s.planes = planes s.sections = s.get_sections() # add background point sources sg = copy.copy(grp) src_groups.append(sg) sg.sources = src.get_background_sources() else: # event_based, use one source sg.sources = [src] src.planes = planes src.sections = src.get_sections() return CompositeSourceModel(full_lt, src_groups) logging.info('Reading the source model(s) in parallel') # NB: the source models file are often NOT in the shared directory # (for instance in oq-engine/demos) so the processpool must be used dist = ('no' if os.environ.get('OQ_DISTRIBUTE') == 'no' else 'processpool') # NB: h5 is None in logictree_test.py allargs = [] for fname in full_lt.source_model_lt.info.smpaths: allargs.append((fname, converter)) smdict = parallel.Starmap(read_source_model, allargs, distribute=dist, h5=h5 if h5 else None).reduce() if len(smdict) > 1: # really parallel parallel.Starmap.shutdown() # save memory fix_geometry_sections(smdict) groups = _build_groups(full_lt, smdict) # checking the changes changes = sum(sg.changes for sg in groups) if changes: logging.info('Applied %d changes to the composite source model', changes) return _get_csm(full_lt, groups) def fix_geometry_sections(smdict): """ If there are MultiFaultSources, fix the sections according to the GeometryModels (if any). """ gmodels = [] smodels = [] for fname, mod in smdict.items(): if isinstance(mod, nrml.GeometryModel): gmodels.append(mod) elif isinstance(mod, nrml.SourceModel): smodels.append(mod) else: raise RuntimeError('Unknown model %s' % mod) # merge the sections sections = [] for gmod in gmodels: sections.extend(gmod.sections) sections.sort(key=operator.attrgetter('sec_id')) nrml.check_unique([sec.sec_id for sec in sections]) # fix the MultiFaultSources for smod in smodels: for sg in smod.src_groups: for src in sg: if hasattr(src, 'create_inverted_index'): if not sections: raise RuntimeError('Missing geometryModel files!') src.create_inverted_index(sections) def _build_groups(full_lt, smdict): # build all the possible source groups from the full logic tree smlt_file = full_lt.source_model_lt.filename smlt_dir = os.path.dirname(smlt_file) def _groups_ids(value): # extract the source groups and ids from a sequence of source files groups = [] for name in value.split(): fname = os.path.abspath(os.path.join(smlt_dir, name)) groups.extend(smdict[fname].src_groups) return groups, set(src.source_id for grp in groups for src in grp) groups = [] for rlz in full_lt.sm_rlzs: src_groups, source_ids = _groups_ids(rlz.value) bset_values = full_lt.source_model_lt.bset_values(rlz) if bset_values and bset_values[0][0].uncertainty_type == 'extendModel': (bset, value), *bset_values = bset_values extra, extra_ids = _groups_ids(value) common = source_ids & extra_ids if common: raise InvalidFile( '%s contains source(s) %s already present in %s' % (value, common, rlz.value)) src_groups.extend(extra) for src_group in src_groups: trt_smr = full_lt.get_trt_smr(src_group.trt, rlz.ordinal) sg = apply_uncertainties(bset_values, src_group) for src in sg: src.trt_smr = trt_smr if rlz.samples > 1: src.samples = rlz.samples groups.append(sg) # check applyToSources sm_branch = rlz.lt_path[0] srcids = full_lt.source_model_lt.info.applytosources[sm_branch] for srcid in srcids: if srcid not in source_ids: raise ValueError( "The source %s is not in the source model," " please fix applyToSources in %s or the " "source model(s) %s" % (srcid, smlt_file, rlz.value.split())) return groups def reduce_sources(sources_with_same_id): """ :param sources_with_same_id: a list of sources with the same source_id :returns: a list of truly unique sources, ordered by trt_smr """ out = [] for src in sources_with_same_id: dic = {k: v for k, v in vars(src).items() if k not in 'source_id trt_smr samples'} src.checksum = zlib.adler32(pickle.dumps(dic, protocol=4)) for srcs in general.groupby( sources_with_same_id, operator.attrgetter('checksum')).values(): # duplicate sources: same id, same checksum src = srcs[0] if len(srcs) > 1: # happens in classical/case_20 src.trt_smr = tuple(s.trt_smr for s in srcs) else: src.trt_smr = src.trt_smr, out.append(src) out.sort(key=operator.attrgetter('trt_smr')) return out def _get_csm(full_lt, groups): # 1. extract a single source from multiple sources with the same ID # 2. regroup the sources in non-atomic groups by TRT # 3. reorder the sources by source_id atomic = [] acc = general.AccumDict(accum=[]) for grp in groups: if grp and grp.atomic: atomic.append(grp) elif grp: acc[grp.trt].extend(grp) key = operator.attrgetter('source_id', 'code') src_groups = [] for trt in acc: lst = [] for srcs in general.groupby(acc[trt], key).values(): if len(srcs) > 1: srcs = reduce_sources(srcs) lst.extend(srcs) for sources in general.groupby(lst, trt_smrs).values(): # check if OQ_SAMPLE_SOURCES is set ss = os.environ.get('OQ_SAMPLE_SOURCES') if ss: logging.info('Reducing the number of sources for %s', trt) split = [] for src in sources: for s in src: s.trt_smr = src.trt_smr split.append(s) sources = general.random_filter(split, float(ss)) or split[0] # set ._wkt attribute (for later storage in the source_wkt dataset) for src in sources: src._wkt = src.wkt() src_groups.append(sourceconverter.SourceGroup(trt, sources)) for ag in atomic: for src in ag: src._wkt = src.wkt() src_groups.extend(atomic) _check_dupl_ids(src_groups) for sg in src_groups: sg.sources.sort(key=operator.attrgetter('source_id')) return CompositeSourceModel(full_lt, src_groups) class CompositeSourceModel: """ :param full_lt: a :class:`FullLogicTree` instance :param src_groups: a list of SourceGroups :param event_based: a flag True for event based calculations, flag otherwise """ def __init__(self, full_lt, src_groups): self.gsim_lt = full_lt.gsim_lt self.source_model_lt = full_lt.source_model_lt self.sm_rlzs = full_lt.sm_rlzs self.full_lt = full_lt self.src_groups = src_groups idx = 0 for grp_id, sg in enumerate(src_groups): assert len(sg) # sanity check for src in sg: src.id = idx src.grp_id = grp_id idx += 1 def get_trt_smrs(self): """ :returns: an array of trt_smrs (to be stored as an hdf5.vuint32 array) """ keys = [sg.sources[0].trt_smrs for sg in self.src_groups] assert len(keys) < TWO16, len(keys) return [numpy.array(trt_smrs, numpy.uint32) for trt_smrs in keys] def get_sources(self, atomic=None): """ :returns: list of sources in the composite source model """ srcs = [] for src_group in self.src_groups: if atomic is None: # get all sources srcs.extend(src_group) elif atomic == src_group.atomic: srcs.extend(src_group) return srcs # used only in calc_by_rlz.py def get_groups(self, smr): """ :param smr: effective source model realization ID :returns: SourceGroups associated to the given `smr` """ src_groups = [] for sg in self.src_groups: trt_smr = self.full_lt.get_trt_smr(sg.trt, smr) src_group = copy.copy(sg) src_group.sources = [src for src in sg if trt_smr in src.trt_smrs] if len(src_group): src_groups.append(src_group) return src_groups def get_mags_by_trt(self): """ :returns: a dictionary trt -> magnitudes in the sources as strings """ mags = general.AccumDict(accum=set()) # trt -> mags for sg in self.src_groups: for src in sg: if hasattr(src, 'mags'): # UCERF srcmags = ['%.2f' % mag for mag in numpy.unique( numpy.round(src.mags, 2))] elif hasattr(src, 'data'): # nonparametric srcmags = ['%.2f' % item[0].mag for item in src.data] else: srcmags = ['%.2f' % item[0] for item in src.get_annual_occurrence_rates()] mags[sg.trt].update(srcmags) return {trt: sorted(mags[trt]) for trt in mags} def get_floating_spinning_factors(self): """ :returns: (floating rupture factor, spinning rupture factor) """ data = [] for sg in self.src_groups: for src in sg: if hasattr(src, 'hypocenter_distribution'): data.append( (len(src.hypocenter_distribution.data), len(src.nodal_plane_distribution.data))) if not data: return numpy.array([1, 1]) return numpy.array(data).mean(axis=0) def update_source_info(self, calc_times, nsites=False): """ Update (eff_ruptures, num_sites, calc_time) inside the source_info """ for src_id, arr in calc_times.items(): row = self.source_info[src_id] row[CALC_TIME] += arr[2] if len(arr) == 4: # after preclassical row[TASK_NO] = arr[3] if nsites: row[EFF_RUPTURES] += arr[0] row[NUM_SITES] += arr[1] def count_ruptures(self): """ Call src.count_ruptures() on each source. Slow. """ n = 0 for src in self.get_sources(): n += src.count_ruptures() return n def __repr__(self): """ Return a string representation of the composite model """ contents = [] for sg in self.src_groups: arr = numpy.array([src.source_id for src in sg]) line = f'grp_id={sg.sources[0].grp_id} {arr}' contents.append(line) return '<%s\n%s>' % (self.__class__.__name__, '\n'.join(contents))
agpl-3.0
4,829,314,217,559,910,000
36.08209
79
0.572483
false
alexdu/robot-sandbox
sim-files/geom.py
1
2503
# geom.py # # Copyright 2010 Alex Dumitrache <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. from __future__ import division import math import numpy from numpy import matrix, mat import math from math import pi def rotx(ang): c = math.cos(ang * pi/180) s = math.sin(ang * pi/180) return mat([[1, 0, 0], [0, c, -s], [0, s, c]]) def roty(ang): c = math.cos(ang * pi/180) s = math.sin(ang * pi/180) return mat([[ c, 0, s], [ 0, 1, 0], [-s, 0, c]]) def rotz(ang): c = math.cos(ang * pi/180) s = math.sin(ang * pi/180) return mat([[c, -s, 0], [s, c, 0], [0, 0, 1]]) def omorot(r): m = mat(numpy.zeros((4,4))) m[0:3, 0:3] = r m[3,3] = 1 return m def omotrans(x,y,z): return mat([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]) def angdif(a,b=0): d = ((a - b + pi) % (2*pi)) - pi return d def angdifd(a,b=0): d = ((a - b + 180) % 360) - 180 return d def mat2ypr(M): yaw = math.atan2(M[1,2],M[0,2]); pitch = math.atan2(math.sqrt((M[2,0])**2 + (M[2,1])**2), M[2,2]); roll = math.atan2(M[2,1],-M[2,0]); if abs(pitch) < 1E-4: yaw = 0 roll = math.atan2(M[1,0],M[0,0]) if abs(abs(pitch) - pi) < 1E-4: yaw = 0; roll = math.atan2(M[1,0],-M[0,0]); return (yaw*180/pi, pitch*180/pi, roll*180/pi) def decompose(M): [x,y,z] = M[0:3, 3].flatten().tolist()[0] return (x, y, z) + mat2ypr(M)
gpl-3.0
400,059,909,724,239,170
26.123596
76
0.499001
false
Commonists/CommonsDownloader
setup.py
1
1250
#!/usr/bin/python # -*- coding: latin-1 -*- """Setup script.""" try: from setuptools import setup except ImportError: from distutils.core import setup try: import commonsdownloader version = commonsdownloader.__version__ except ImportError: version = 'Undefined' classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ] packages = ['commonsdownloader'] requires = ['argparse', 'mwclient', 'six'] entry_points = { 'console_scripts': [ 'download_from_Wikimedia_Commons = commonsdownloader.commonsdownloader:main', ] } setup( name='CommonsDownloader', version=version, author='Jean-Frédéric', author_email='JeanFred@github', url='http://github.com/Commonists/CommonsDownloader', description='Download thumbnails from Wikimedia Commons', long_description=open('README.md').read(), license='MIT', packages=packages, entry_points=entry_points, install_requires=requires, classifiers=classifiers )
mit
7,684,595,938,258,582,000
25
89
0.650641
false
oisinmulvihill/JSONP_PostPutGetDelete_With_Tornado
restserver/pp/jsonpcrud/service/jquery_unparam.py
1
1716
#!/usr/bin/python # -*- coding: utf-8 -*- """ From: https://bitbucket.org/k_bx/jquery-unparam/src """ import re from urllib import unquote_plus def parse_key_pair(keyval): keyval_splitted = keyval.split('=', 1) if len(keyval_splitted) == 1: key, val = keyval_splitted[0], '' else: key, val = keyval_splitted if key == '': return {} groups = re.findall(r"\[.*?\]", key) groups_joined = ''.join(groups) if key[-len(groups_joined):] == groups_joined: key = key[:-len(groups_joined)] for group in reversed(groups): if group == '[]': val = [val] else: val = {group[1:-1]: val} return {key: val} def merge_two_structs(s1, s2): if isinstance(s1, list) and \ isinstance(s2, list): return s1 + s2 if isinstance(s1, dict) and \ isinstance(s2, dict): retval = s1.copy() for key, val in s2.iteritems(): if retval.get(key) is None: retval[key] = val else: retval[key] = merge_two_structs(retval[key], val) return retval return s2 def merge_structs(structs): if len(structs) == 0: return None if len(structs) == 1: return structs[0] first, rest = structs[0], structs[1:] return merge_two_structs(first, merge_structs(rest)) def jquery_unparam_unquoted(jquery_params): pair_strings = jquery_params.split('&') key_pairs = [parse_key_pair(x) for x in pair_strings] return merge_structs(key_pairs) def jquery_unparam(jquery_params): return jquery_unparam_unquoted(unquote_plus(jquery_params)) if __name__ == '__main__': pass
mit
-1,928,941,578,323,037,400
25.4
65
0.565851
false
SEL-Columbia/commcare-hq
corehq/apps/callcenter/signals.py
1
1701
from __future__ import print_function import sys import logging from couchdbkit.exceptions import ResourceNotFound from django.conf import settings from django.db.models import signals from requests.exceptions import RequestException from corehq.apps.callcenter.utils import sync_user_cases, bootstrap_callcenter, get_call_center_domains from corehq.apps.domain.models import Domain from corehq.apps.domain.signals import commcare_domain_post_save from corehq.apps.users.signals import commcare_user_post_save from corehq.elastic import es_query, ESError logger = logging.getLogger(__name__) def sync_user_cases_signal(sender, **kwargs): return sync_user_cases(kwargs["couch_user"]) commcare_user_post_save.connect(sync_user_cases_signal) def bootstrap_callcenter_domain_signal(sender, **kwargs): return bootstrap_callcenter(kwargs['domain']) commcare_domain_post_save.connect(bootstrap_callcenter_domain_signal) _module = __name__.rsplit('.', 1)[0] def catch_signal(app, **kwargs): app_name = app.__name__.rsplit('.', 1)[0] if app_name == _module: def _log(msg): if not settings.DEBUG: logging.exception(msg) else: print(msg, file=sys.stderr) try: domains = get_call_center_domains() except (RequestException, ESError): _log('Unable to query ES for call-center domains during syncdb') domains = [] for name in domains: domain = Domain.get_by_name(name) if domain: print(' callcenter bootstap `{0}`'.format(domain.name)) bootstrap_callcenter(domain) signals.post_syncdb.connect(catch_signal)
bsd-3-clause
7,144,069,088,647,544,000
32.352941
103
0.685479
false
Diwahars/pycon
pycon/tests/factories.py
1
2068
import random import factory import factory.django import factory.fuzzy from django.contrib.auth import models as auth from pycon.models import PyConProposalCategory, PyConProposal, \ PyConTalkProposal, PyConTutorialProposal, ThunderdomeGroup from symposion.proposals.tests.factories import ProposalKindFactory, \ ProposalBaseFactory from symposion.reviews.models import ProposalResult class UserFactory(factory.django.DjangoModelFactory): class Meta: model = auth.User username = factory.fuzzy.FuzzyText() first_name = factory.fuzzy.FuzzyText() last_name = factory.fuzzy.FuzzyText() email = factory.Sequence(lambda n: 'user{}@example.com'.format(n)) class ProposalResultFactory(factory.django.DjangoModelFactory): class Meta: model = ProposalResult class PyConProposalCategoryFactory(factory.django.DjangoModelFactory): class Meta: model = PyConProposalCategory class PyConProposalFactory(ProposalBaseFactory): class Meta: model = PyConProposal abstract = True category = factory.SubFactory(PyConProposalCategoryFactory) audience_level = factory.LazyAttribute(lambda a: random.choice([1, 2, 3])) class PyConTalkProposalFactory(PyConProposalFactory): class Meta: model = PyConTalkProposal duration = 0 kind = factory.SubFactory(ProposalKindFactory, name="talk", slug="talk") outline = "outline" audience = "audience" perceived_value = "perceived_value" class PyConTutorialProposalFactory(PyConProposalFactory): class Meta: model = PyConTutorialProposal kind = factory.SubFactory(ProposalKindFactory, name="tutorial", slug="tutorial") domain_level = 1 outline = "outline" more_info = "more info" audience = "audience" perceived_value = "perceived_value" class ThunderdomeGroupFactory(factory.django.DjangoModelFactory): class Meta: model = ThunderdomeGroup
bsd-3-clause
-1,816,251,246,539,717,600
25.857143
78
0.699226
false
astr93/c_sghmc
c_sghmc/projectcode.py
1
21329
import numpy as np import numpy.random import sympy as sp import seaborn as sns import matplotlib.pyplot as plt def hmc(U, gradU, M, epsilon, m, theta, mhtest=1): """Hamiltonian Monte-Carlo algorithm with an optional Metropolis-Hastings test U is potential energy as a callable function gradU is its gradient as a callable function M is a mass matrix for kinetic energy epsilon is the step size dt m is the number of iterations theta is the parameter of interest mhters=1 is to include MH test by default - yes """ #draw momentum r=numpy.random.normal(size=(np.size(theta),1))*np.sqrt(M) theta0=theta E0=r.T * M * r/2+U(theta) #do leapfrog for i in range(1,m+1): r=r-gradU(theta)*epsilon/2 theta=theta+epsilon*r/M r=r-gradU(theta)*epsilon/2 r=-r #carry out MH test if mhtest != 0: Enew=r.T * M * r/2+U(theta) if np.exp(E0-Enew)<numpy.random.uniform(0,1,(1,1)): theta=theta0 newtheta=theta return newtheta #Parameters for analysis (to replicate the paper) nsample=80000 #number of iterations for the sample xstep=0.01 #step size for true distribution M=1 #mass C=3 #constant for sghmc epsilon=0.1 #dt stepsize term m=50 #number of steps for Monte-Carlo V=4 #estimate of Fisher Info for Bhat approximation in sghmc numpy.random.seed(2017) x=sp.symbols('x') U = sp.symbols('U', cls=sp.Function) U=sp.Matrix([-2* x**2 + x**4]) #define your potential energy here x = sp.Matrix([x]) gradientU = sp.simplify(U.jacobian(x)) #cover sympy function object into a callable function U=sp.lambdify(x,U) gradU=sp.lambdify(x,gradientU) #True distribution plt.figure(1) plt.subplot(211) gridx=np.linspace(-3,3,6/xstep) y=np.exp(-U(gridx)) plt.plot(gridx, np.reshape(y/np.sum(y)/xstep, (int(6/xstep), 1)) , 'bo') pass #hmc sampling alhorithm sampleshmc=np.zeros(shape=(nsample,1)) theta=0 for i in range(1,nsample+1): theta=hmc(U,gradU,M,epsilon,m,theta) sampleshmc[i-1]=theta #function to access the precision of approximation def comparison(y,samples): """Returns a euclidean distance as precision proxy y is the true ditribution samples are drawn using an MCMC algorithm """ y=np.reshape(y/np.sum(y)/xstep, (int(6/xstep), 1)) yh, xh= numpy.histogram(samples, bins=gridx) #compute a histogram for samples yh=yh/np.sum(yh)/xstep return np.sqrt(np.sum((yh[:,None]-y[1:])**2)) #euc distance between the two #hmc precision comparison(y,sampleshmc) #normalized histogram of hmc drawn samples sns.distplot(sampleshmc) pass def sghmc(U,gradU,M,epsilon,m,theta,C,V): """Stochastic Gradient Hamiltonian Monte-Carlo algorithm U is potential energy as a callable function gradU is its gradient as a callable function (noisy) M is a mass matrix for kinetic energy epsilon is the step size dt m is the number of iterations theta is the parameter of interest C is a user defined constant V is a Fisher info approximation """ #draw a momentum and compute Bhat r=numpy.random.standard_normal(size=(np.size(theta),1))*np.sqrt(M) Bhat=0.5*V*epsilon Ax=np.sqrt(2*(C-Bhat)*epsilon) #do leapfrog for i in range(1,m+1): r=r-gradU(theta)*epsilon-r*C*epsilon+numpy.random.standard_normal(size=(1,1))*Ax theta=theta+(r/M)*epsilon newtheta=theta return newtheta #sghmc sampling alhorithm (Pure python) samplessghmc=np.zeros(shape=(nsample,1)) theta=0 for i in range(1,nsample+1): theta=sghmc(U,gradU,M,epsilon,m,theta,C,V) samplessghmc[i-1]=theta #pure sghmc precision comparison(y,samplessghmc) #import a wrapped in pybind11 c++ implementation of sghmc algorithm import cppimport sghwrap=cppimport.imp("sghmcwrap") #sghmc sampling alhorithm (compilation in C++) samplessghmc_c=np.zeros(shape=(nsample,1)) theta=0 for i in range(1,nsample+1): theta=sghwrap.sghmc(U,gradU,M,epsilon,m,theta,C,V) samplessghmc_c[i-1]=theta #c++ sghmc precision comparison(y,samplessghmc_c) import numba from numba import jit from numba import float64 #prepare a just-in-time compiled function calling C++ sghmc algorithm @jit(float64[:](float64, float64, float64, float64, float64, float64)) def sampling(nsample,M,epsilon,m,C,V): theta=0 for i in range(1,nsample+1): theta=sghwrap.sghmc(U,gradU,M,epsilon,m,theta,C,V) samplessghmc_numba[i-1]=theta return samplessghmc_numba #sghmc sampling alhorithm (compilation in C++ of a jitted function) samplessghmc_numba=np.zeros(shape=(nsample,1)) samplessghmc_numba=sampling(nsample,M,epsilon,m,C,V) #jitted c++ sghmc precision comparison(y,samplessghmc_numba) #normalized histogram of sghmc drawn samples import seaborn as sns sns.distplot(samplessghmc_numba) pass %load_ext Cython import scipy.io import scipy import scipy.linalg as la import scipy.sparse import urllib.request #call "Australian credit" dataset for a Bayesian Linear Regression analysis #Bache, K. and Lichman, M. UCI machine learning repository,2013. URL http://archive.ics.uci. edu/ml. filename = 'australian' url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/statlog/australian/australian.dat' urllib.request.urlretrieve(url, filename) data = np.loadtxt(filename) #Parameters for BLR alpha=0.01 #sigma of prior normal nstepsunscaled=1000 #unscaled number of steps for Monte-Carlo scaleHCM=2 #ratio of size of steps for integration to their number niters=6000 #number of iterations scale_StepSize=0.5 #default 0.5 for sigma=0.01 m = np.round(nstepsunscaled/scaleHCM) #scaled number of steps for Monte-Carlo BurnIn = 1000 #number of iteration to use for burn in StepSize = 0.1 #unscaled dt/epsilon step size for dynamics StepSize = scale_StepSize*StepSize*scaleHCM; #scaled dt/epsilon step size for dynamics Poly_Order = 1 #order of polynomial to fit numpy.random.seed(2017) Xraw=data Y=Xraw[:,-1] #to test on Xraw = np.delete(Xraw, -1, 1) #leave only the data for training # Normalize Data N,D=Xraw.shape Xraw=(Xraw-np.mean(Xraw,0))/np.std(Xraw,0) # Create Polynomial Basis X = np.ones(shape=(N,1)) for i in range(Poly_Order): X = np.concatenate((X,Xraw**(i+1)),1) N,D = X.shape Mass = np.eye(D) InvMass = scipy.sparse.csr_matrix(la.inv(Mass)) #find inverse of Mass # Set initial values of w w = np.zeros(shape=(D,1)) ws = np.zeros(shape=(niters-BurnIn,D)) def LogNormPDF(xs,mu,sigma): """LogPrior calculcation as a LogNormal distribution xs are the values (Dx1) mu are the means (Dx1) sigma is the cov matrix (Dx1 as diag) """ if xs.shape[1] > 1: xs = xs.T if mu.shape[1] > 1: mu = mu.T D = max(xs.shape) return sum( -np.ones(shape=(D,1))*(0.5*np.log(2*np.pi*sigma)) - ((xs-mu)**2)/(2*(np.ones(shape=(D,1))*sigma)) ) #Compute energy and joint loglikelihood for current w LogPrior = LogNormPDF(np.zeros(shape=(1,D)),w,alpha) f = X@w LogLikelihood = f.T@Y - np.sum(np.log(1+np.exp(f))) CurrentLJL = LogLikelihood + LogPrior Proposed = 0 Accepted = 0 #Pure Python version of HMC BLR for iteration in range(niters): #draw momentum and stepsize r = (numpy.random.standard_normal(size=(1,D))@Mass).T r0 = r wnew = w Proposed = Proposed + 1 RandomStep = np.round(np.random.rand(1)*(m-1))+1 #do leapfrog mark = 0 f = X@wnew r = r + (StepSize/2)*( X.T@( Y[:,None] - (np.exp(f)/(1+np.exp(f))) ) - np.eye(D)*(1/alpha)@wnew) for step in range(int(RandomStep)-1): #make sure everything is well-behaved if (np.isnan(np.sum(r)) or np.isnan(np.sum(wnew)) or np.isinf(np.sum(r)) or np.isinf(np.sum(wnew))): mark = 1 break wnew = wnew + StepSize*(r) f = X@wnew r = r + StepSize*( X.T@( Y[:,None] - (1./(1+np.exp(-f))) ) - np.eye(D)*(1/alpha)@wnew ) r = np.real(r) f = np.real(f) if (mark == 0): wnew = wnew + StepSize*(r) f = X@wnew r = r + (StepSize/2)*( X.T@( Y[:,None] - (np.exp(f)/(1+np.exp(f))) ) - np.eye(D)*(1/alpha)@wnew ) else: r = r - (StepSize/2)*( X.T@( Y[:,None] - (np.exp(f)/(1+np.exp(f))) ) - np.eye(D)*(1/alpha)@wnew ) #find proposed energy H and train likelihood LogPrior = LogNormPDF(np.zeros(shape=(1,D)),wnew,alpha) f = X@wnew LogLikelihood = f.T@Y - np.sum(np.log(1+np.exp(f))) ProposedLJL = LogLikelihood + LogPrior ProposedH = -ProposedLJL + (r.T@InvMass@r)/2 #compute current H value CurrentH = -CurrentLJL + (r0.T@InvMass@r0)/2 #Accept according to Metropolis-Hastings ratio MH = -ProposedH + CurrentH if (MH > 0) or (MH > np.log(numpy.random.rand(1))): CurrentLJL = ProposedLJL w = wnew Accepted = Accepted + 1 #Now save samples after burn in if iteration > BurnIn: ws[[iteration-BurnIn-1],:] = w.T elif np.mod(iteration,50) == 0: Accepted = 0 Proposed = 0 #Fit the model and find R squared bhat=np.mean(ws,0) Yhat=X@bhat[:,None] SSR=np.sqrt(np.sum((Y[:,None]-Yhat)**2)) TSS=np.sum((Y-np.mean(Y,0))**2) Rsq=1-SSR/TSS Rsq Proposed=0 Accepted=0 %%cython -a import cython import numpy as np cimport numpy as np import numpy.random cdef inline int int_max(int a, int b): return a if a >= b else b #a quicker version of max @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @cython.cdivision(True) cdef LogNormPDF_cython(np.ndarray[np.float64_t, ndim=2] O, np.ndarray[np.float64_t, ndim=2] xs, np.ndarray[np.float64_t, ndim=2] mu, double sigma): """LogPrior calculcation as a LogNormal distribution xs are the values (Dx1) mu are the means (Dx1) sigma is the cov matrix (Dx1 as diag) """ if xs.shape[1] > 1: xs = xs.T if mu.shape[1] > 1: mu = mu.T cdef int D = int_max(xs.shape[0],xs.shape[1]) return sum( -O*(0.5*np.log(2*np.pi*sigma)) - ((xs-mu)**2)/(2*(O)*sigma)) @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @cython.cdivision(True) cdef momentupdate(np.ndarray[np.float64_t, ndim=2] E, np.ndarray[np.float64_t, ndim=2] X, np.ndarray[np.float64_t, ndim=1] Y, np.ndarray[np.float64_t, ndim=2] f, int D, double alpha, np.ndarray[np.float64_t, ndim=2] wnew): """Update momentum given current data """ cdef np.ndarray[np.float64_t, ndim=2] g=np.exp(f) return ( np.dot(X.T,( Y[:,None] - (g/(1+g)) )) - E*(1/alpha)@wnew) @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @cython.cdivision(True) cdef lfrogupdate(np.ndarray[np.float64_t, ndim=2] E, np.ndarray[np.float64_t, ndim=2] X, np.ndarray[np.float64_t, ndim=1] Y, np.ndarray[np.float64_t, ndim=2] f, int D, double alpha, np.ndarray[np.float64_t, ndim=2] wnew): """Update momentum given current data in leapfrog iterations """ return ( np.dot(X.T,( Y[:,None] - (1./(1+np.exp(-f))) )) - E*(1/alpha)@wnew) @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) def BLR_hmc_cython(int D, np.ndarray[np.float64_t, ndim=2] Mass, np.ndarray[np.float64_t, ndim=2] w, double m, np.ndarray[np.float64_t, ndim=2] X, np.ndarray[np.float64_t, ndim=1] Y, np.ndarray[np.float64_t, ndim=2] f, double alpha, double StepSize, int BurnIn, int niters, double CurrentLJL): """Bayesian Linear Regression using HMC algorithm implemented using Cython D is shape of data Mass is the mass matrix of kinetic energy w is a vector of coefficients to estimate m is number of iterations for Monte-Carlo X is the explanatory data matrix Y is the explained vector f fit given initial coefficients (0s) alpha is variance of prior StepSize dt for dynamics BurnIn number of iteration to use for burn in niters number of iteration for Monte-Carlo CurrentLJL initial state of energy """ cdef int Proposed=0 cdef int Accepted=0 cdef int iteration, mark, step cdef np.ndarray[np.float64_t, ndim=2] ws = np.zeros(shape=(niters-BurnIn,D)) #coefficients to save cdef np.ndarray[np.float64_t, ndim=2] wnew cdef np.ndarray[np.float64_t, ndim=2] r, r0 cdef np.ndarray[np.float64_t, ndim=1] LogPrior, LogLikelihood, ProposedLJL, RandomStep cdef np.ndarray[np.float64_t, ndim=2] MH, ProposedH, CurrentH cdef np.ndarray[np.float64_t, ndim=2] Z=np.zeros(shape=(1,D)) cdef np.ndarray[np.float64_t, ndim=2] O=np.ones(shape=(D,1)) cdef np.ndarray[np.float64_t, ndim=2] E=np.eye(D) for iteration in range(niters): #draw momentum r = (np.dot(numpy.random.standard_normal(size=(1,D)),Mass)).T r0 = r wnew = w Proposed = Proposed + 1 RandomStep = np.round(np.random.rand(1)*(m-1))+1 #do leapfrog mark = 0 f = np.dot(X,wnew) r = r + (StepSize/2)*momentupdate(E,X,Y,f,D,alpha,wnew) for step in range(np.int(RandomStep)-1): #make sure everything is well-behaved if (np.isnan(np.sum(r)) or np.isnan(np.sum(wnew)) or np.isinf(np.sum(r)) or np.isinf(np.sum(wnew))): mark = 1 break wnew = wnew + StepSize*(r) f = np.dot(X,wnew) r = r + StepSize*lfrogupdate(E,X,Y,f,D,alpha,wnew) r = np.real(r) f = np.real(f) if (mark == 0): wnew = wnew + StepSize*(r) f = np.dot(X,wnew) r = r + (StepSize/2)*momentupdate(E,X,Y,f,D,alpha,wnew) else: r = r - (StepSize/2)*momentupdate(E,X,Y,f,D,alpha,wnew) #find proposed energy H and train likelihood LogPrior = LogNormPDF_cython(O,Z,wnew,alpha) f = np.dot(X,wnew) LogLikelihood = np.dot(f.T,Y) - np.sum(np.log(1+np.exp(f))) ProposedLJL = LogLikelihood + LogPrior ProposedH = -ProposedLJL + (np.dot(np.dot(r.T,Mass),r))/2 #compute current H value CurrentH = -CurrentLJL + (np.dot(np.dot(r0.T,Mass),r0))/2 #Accept according to Metropolis-Hastings ratio MH = -ProposedH + CurrentH if (MH > 0) or (MH > np.log(numpy.random.rand(1))): CurrentLJL = ProposedLJL w = wnew Accepted = Accepted + 1 #Now save samples after burn in if iteration > BurnIn: ws[iteration-BurnIn-1,:] = np.ravel(w) elif np.mod(iteration,50) == 0: Accepted = 0 Proposed = 0 return ws BRLHMCcoeffs=BLR_hmc_cython(D, Mass, w, m, X, Y, f, alpha, StepSize, BurnIn, niters, CurrentLJL) #Fit the model and find R squared bhat=np.mean(BRLHMCcoeffs,0) Yhat=X@bhat[:,None] SSR=np.sqrt(np.sum((Y[:,None]-Yhat)**2)) TSS=np.sum((Y-np.mean(Y,0))**2) Rsq=1-SSR/TSS Rsq #Pure Python version of SGHMC BLR C=3 #user-chosen const s.t. C>=B Bhat=0 #for simplicity, but ideally Bhat=0.5*Vhat*dt with Vhat estimated via empirical Fisher Info for iteration in range(niters): #draw momentum r = (numpy.random.standard_normal(size=(1,D))@Mass).T r0 = r wnew = w RandomStep = np.round(np.random.rand(1)*(m-1))+1 #do leapfrog mark = 0 f = X@wnew J = np.sqrt( 2 * (C-Bhat) * StepSize) for step in range(int(RandomStep)-1): #make sure everything is well-behaved if (np.isnan(np.sum(r)) or np.isnan(np.sum(wnew)) or np.isinf(np.sum(r)) or np.isinf(np.sum(wnew))): mark = 1 break wnew = wnew + StepSize*(r) f = X@wnew r = (r + StepSize*( X.T@( Y[:,None] - (1./(1+np.exp(-f))) ) - np.eye(D)*(1/alpha)@wnew )-StepSize*C*(r)+numpy.random.standard_normal(size=(D,1))*J) r = np.real(r) f = np.real(f) if (mark == 0): wnew = wnew + StepSize*(r) f = X@wnew #find proposed total energy H and train likelihood LogPrior = LogNormPDF(np.zeros(shape=(1,D)),wnew,alpha) f = X@wnew LogLikelihood = f.T@Y - np.sum(np.log(1+np.exp(f))) #training likelihood ProposedLJL = LogLikelihood + LogPrior w=wnew #Now save samples after burn in if iteration > BurnIn: ws[iteration-BurnIn-1,:] = w.ravel() bhat=np.mean(ws,0) Yhat=X@bhat[:,None] SSR=np.sqrt(np.sum((Y[:,None]-Yhat)**2)) TSS=np.sum((Y-np.mean(Y,0))**2) Rsq=1-SSR/TSS Rsq C=3 #user-chosen const s.t. C>=B Bhat=0 #for simplicity, but ideally Bhat=0.5*Vhat*dt with Vhat estimated via empirical Fisher Info %%cython -a import cython import numpy as np cimport numpy as np import numpy.random cdef inline int int_max(int a, int b): return a if a >= b else b @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @cython.cdivision(True) cdef LogNormPDF_cython(np.ndarray[np.float64_t, ndim=2] O, np.ndarray[np.float64_t, ndim=2] xs, np.ndarray[np.float64_t, ndim=2] mu, double sigma): """LogPrior calculcation as a LogNormal distribution xs are the values (Dx1) mu are the means (Dx1) sigma is the cov matrix (Dx1 as diag) """ if xs.shape[1] > 1: xs = xs.T if mu.shape[1] > 1: mu = mu.T cdef int D = int_max(xs.shape[0],xs.shape[1]) return sum( -O*(0.5*np.log(2*np.pi*sigma)) - ((xs-mu)**2)/(2*(O)*sigma)) @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @cython.cdivision(True) cdef lfrogupdate(np.ndarray[np.float64_t, ndim=2] E, np.ndarray[np.float64_t, ndim=2] X, np.ndarray[np.float64_t, ndim=1] Y, np.ndarray[np.float64_t, ndim=2] f, int D, double alpha, np.ndarray[np.float64_t, ndim=2] wnew): """Update momentum given current data in leapfrog iterations """ return ( np.dot(X.T,( Y[:,None] - (1./(1+np.exp(-f))) )) - E*(1/alpha)@wnew) cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) def BLR_sghmc_cython(int C, int Bhat, int D, np.ndarray[np.float64_t, ndim=2] Mass, np.ndarray[np.float64_t, ndim=2] w, double m, np.ndarray[np.float64_t, ndim=2] X, np.ndarray[np.float64_t, ndim=1] Y, np.ndarray[np.float64_t, ndim=2] f, double alpha, double StepSize, int BurnIn, int niters, double CurrentLJL): """Bayesian Linear Regression using HMC algorithm implemented using Cython C is a user specified constant Bhat is an approximate set to 0 here (it should converge to 0) D is shape of data Mass is the mass matrix of kinetic energy w is a vector of coefficients to estimate m is number of iterations for Monte-Carlo X is the explanatory data matrix Y is the explained vector f fit given initial coefficients (0s) alpha is variance of prior StepSize dt for dynamics BurnIn number of iteration to use for burn in niters number of iteration for Monte-Carlo CurrentLJL initial state of energy """ cdef int iteration, mark, step cdef np.ndarray[np.float64_t, ndim=2] ws = np.zeros(shape=(niters-BurnIn,D)) #coefficients to save cdef np.ndarray[np.float64_t, ndim=2] wnew cdef np.ndarray[np.float64_t, ndim=2] r, r0 cdef np.ndarray[np.float64_t, ndim=1] LogPrior, LogLikelihood, ProposedLJL, RandomStep cdef np.ndarray[np.float64_t, ndim=2] Z=np.zeros(shape=(1,D)) cdef np.ndarray[np.float64_t, ndim=2] O=np.ones(shape=(D,1)) cdef np.ndarray[np.float64_t, ndim=2] E=np.eye(D) cdef double J = np.sqrt( 2 * (C-Bhat) * StepSize) #sd for friction for iteration in range(niters): #draw momentum r = (np.dot(numpy.random.standard_normal(size=(1,D)),Mass)).T r0 = r wnew = w RandomStep = np.round(np.random.rand(1)*(m-1))+1 #do leapfrog mark = 0 f = np.dot(X,wnew) for step in range(int(RandomStep)-1): #make sure everything is well-behaved if (np.isnan(np.sum(r)) or np.isnan(np.sum(wnew)) or np.isinf(np.sum(r)) or np.isinf(np.sum(wnew))): mark = 1 break wnew = wnew + StepSize*(r) f = np.dot(X,wnew) r = (r + StepSize*lfrogupdate(E,X,Y,f,D,alpha,wnew)-StepSize*C*(r)+numpy.random.standard_normal(size=(D,1))*J) r = np.real(r) f = np.real(f) if (mark == 0): wnew = wnew + StepSize*(r) f = np.dot(X,wnew) #find proposed total energy H and train likelihood LogPrior = LogNormPDF_cython(O,Z,wnew,alpha) f = np.dot(X,wnew) LogLikelihood = np.dot(f.T,Y) - np.sum(np.log(1+np.exp(f))) #training likelihood ProposedLJL = LogLikelihood + LogPrior w=wnew #Now save samples after burn in if iteration > BurnIn: ws[iteration-BurnIn-1,:] = w.ravel() return ws BRLSGHMCcoeffs=BLR_sghmc_cython(C, Bhat, D, Mass, w, m, X, Y, f, alpha, StepSize, BurnIn, niters, CurrentLJL) bhat=np.mean(BRLSGHMCcoeffsBRLSGHMCcoeffsBRLSGHMCcoeffs## 663 Final Project Second Report ,0) Yhat=X@bhat[:,None] SSR=np.sqrt(np.sum((Y[:,None]-Yhat)**2)) TSS=np.sum((Y-np.mean(Y,0))**2) Rsq=1-SSR/TSS Rsq
mit
-5,523,963,265,161,026,000
30.977511
312
0.638755
false
tomashaber/raiden
raiden/tasks.py
1
3782
# -*- coding: utf-8 -*- import time from ethereum import slogging import gevent from gevent.event import AsyncResult from gevent.queue import ( Queue, ) REMOVE_CALLBACK = object() log = slogging.get_logger(__name__) # pylint: disable=invalid-name class Task(gevent.Greenlet): """ Base class used to created tasks. Note: Always call super().__init__(). """ def __init__(self): super(Task, self).__init__() self.response_queue = Queue() class AlarmTask(Task): """ Task to notify when a block is mined. """ def __init__(self, chain): super(AlarmTask, self).__init__() self.callbacks = list() self.stop_event = AsyncResult() self.chain = chain self.last_block_number = None # TODO: Start with a larger wait_time and decrease it as the # probability of a new block increases. self.wait_time = 0.5 self.last_loop = time.time() def register_callback(self, callback): """ Register a new callback. Note: The callback will be executed in the AlarmTask context and for this reason it should not block, otherwise we can miss block changes. """ if not callable(callback): raise ValueError('callback is not a callable') self.callbacks.append(callback) def remove_callback(self, callback): """Remove callback from the list of callbacks if it exists""" if callback in self.callbacks: self.callbacks.remove(callback) def _run(self): # pylint: disable=method-hidden log.debug('starting block number', block_number=self.last_block_number) sleep_time = 0 while self.stop_event.wait(sleep_time) is not True: self.poll_for_new_block() # we want this task to iterate in the tick of `wait_time`, so take # into account how long we spent executing one tick. self.last_loop = time.time() work_time = self.last_loop - self.last_loop if work_time > self.wait_time: log.warning( 'alarm loop is taking longer than the wait time', work_time=work_time, wait_time=self.wait_time, ) sleep_time = 0.001 else: sleep_time = self.wait_time - work_time # stopping self.callbacks = list() def poll_for_new_block(self): current_block = self.chain.block_number() if current_block > self.last_block_number + 1: difference = current_block - self.last_block_number - 1 log.error( 'alarm missed %s blocks', difference, ) if current_block != self.last_block_number: log.debug( 'new block', number=current_block, timestamp=self.last_loop, ) self.last_block_number = current_block remove = list() for callback in self.callbacks: try: result = callback(current_block) except: # pylint: disable=bare-except log.exception('unexpected exception on alarm') else: if result is REMOVE_CALLBACK: remove.append(callback) for callback in remove: self.callbacks.remove(callback) def start(self): self.last_block_number = self.chain.block_number() super(AlarmTask, self).start() def stop_and_wait(self): self.stop_event.set(True) gevent.wait(self) def stop_async(self): self.stop_event.set(True)
mit
-6,795,903,697,622,932,000
29.015873
79
0.557377
false
mganeva/mantid
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/FlatPlatePaalmanPingsCorrection.py
1
33315
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + # pylint: disable=no-init,invalid-name,too-many-instance-attributes from __future__ import (absolute_import, division, print_function) import math from six import iteritems from six import integer_types import numpy as np from mantid.simpleapi import * from mantid.api import (PythonAlgorithm, AlgorithmFactory, PropertyMode, MatrixWorkspaceProperty, WorkspaceGroupProperty, InstrumentValidator, Progress) from mantid.kernel import (StringListValidator, StringMandatoryValidator, IntBoundedValidator, FloatBoundedValidator, Direction, logger) class FlatPlatePaalmanPingsCorrection(PythonAlgorithm): # Useful constants PICONV = math.pi / 180.0 TABULATED_WAVELENGTH = 1.798 TABULATED_ENERGY = 25.305 # Sample variables _sample_ws_name = None _sample_chemical_formula = None _sample_density_type = None _sample_density = None _sample_thickness = None _sample_angle = 0.0 # Container Variables _use_can = False _can_ws_name = None _can_chemical_formula = None _can_density_type = None _can_density = None _can_front_thickness = None _can_back_thickness = None _has_sample_in = False _has_can_front_in = False _has_can_back_in = False _number_wavelengths = 10 _emode = None _efixed = 0.0 _output_ws_name = None _angles = list() _wavelengths = list() _interpolate = None # ------------------------------------------------------------------------------ def category(self): return "Workflow\\MIDAS;CorrectionFunctions\\AbsorptionCorrections" def summary(self): return "Calculates absorption corrections for a flat plate sample using Paalman & Pings format." # ------------------------------------------------------------------------------ def PyInit(self): ws_validator = InstrumentValidator() self.declareProperty(MatrixWorkspaceProperty('SampleWorkspace', '', direction=Direction.Input, validator=ws_validator), doc='Name for the input sample workspace') self.declareProperty(name='SampleChemicalFormula', defaultValue='', validator=StringMandatoryValidator(), doc='Sample chemical formula') self.declareProperty(name='SampleDensityType', defaultValue='Mass Density', validator=StringListValidator(['Mass Density', 'Number Density']), doc='Use of Mass density or Number density') self.declareProperty(name='SampleDensity', defaultValue=0.1, doc='Mass density (g/cm^3) or Number density (atoms/Angstrom^3)') self.declareProperty(name='SampleThickness', defaultValue=0.0, validator=FloatBoundedValidator(0.0), doc='Sample thickness in cm') self.declareProperty(name='SampleAngle', defaultValue=0.0, doc='Angle between incident beam and normal to flat plate surface') self.declareProperty(MatrixWorkspaceProperty('CanWorkspace', '', direction=Direction.Input, optional=PropertyMode.Optional, validator=ws_validator), doc="Name for the input container workspace") self.declareProperty(name='CanChemicalFormula', defaultValue='', doc='Container chemical formula') self.declareProperty(name='CanDensityType', defaultValue='Mass Density', validator=StringListValidator(['Mass Density', 'Number Density']), doc='Use of Mass density or Number density') self.declareProperty(name='CanDensity', defaultValue=0.1, doc='Mass density (g/cm^3) or Number density (atoms/Angstrom^3)') self.declareProperty(name='CanFrontThickness', defaultValue=0.0, validator=FloatBoundedValidator(0.0), doc='Container front thickness in cm') self.declareProperty(name='CanBackThickness', defaultValue=0.0, validator=FloatBoundedValidator(0.0), doc='Container back thickness in cm') self.declareProperty(name='NumberWavelengths', defaultValue=10, validator=IntBoundedValidator(1), doc='Number of wavelengths for calculation') self.declareProperty(name='Interpolate', defaultValue=True, doc='Interpolate the correction workspaces to match the sample workspace') self.declareProperty(name='Emode', defaultValue='Elastic', validator=StringListValidator(['Elastic', 'Indirect', 'Direct', 'Efixed']), doc='Energy transfer mode.') self.declareProperty(name='Efixed', defaultValue=0., doc='Analyser energy (mev). By default will be read from the instrument parameters. ' 'Specify manually to override. This is used only in Efixed energy transfer mode.') self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '', direction=Direction.Output), doc='The output corrections workspace group') # ------------------------------------------------------------------------------ def validateInputs(self): issues = dict() sample_ws_name = self.getPropertyValue('SampleWorkspace') can_ws_name = self.getPropertyValue('CanWorkspace') use_can = can_ws_name != '' # Ensure that a can chemical formula is given when using a can workspace if use_can: can_chemical_formula = self.getPropertyValue('CanChemicalFormula') if can_chemical_formula == '': issues['CanChemicalFormula'] = 'Must provide a chemical formula when providing a can workspace' self._emode = self.getPropertyValue('Emode') self._efixed = self.getProperty('Efixed').value if self._emode != 'Efixed': # require both sample and can ws have wavelenght as x-axis if mtd[sample_ws_name].getAxis(0).getUnit().unitID() != 'Wavelength': issues['SampleWorkspace'] = 'Workspace must have units of wavelength.' if use_can and mtd[can_ws_name].getAxis(0).getUnit().unitID() != 'Wavelength': issues['CanWorkspace'] = 'Workspace must have units of wavelength.' return issues # ------------------------------------------------------------------------------ def PyExec(self): self._setup() self._wave_range() setup_prog = Progress(self, start=0.0, end=0.2, nreports=2) # Set sample material form chemical formula setup_prog.report('Set sample material') self._sample_density = self._set_material(self._sample_ws_name, self._sample_chemical_formula, self._sample_density_type, self._sample_density) # If using a can, set sample material using chemical formula if self._use_can: setup_prog.report('Set container sample material') self._can_density = self._set_material(self._can_ws_name, self._can_chemical_formula, self._can_density_type, self._can_density) # Holders for the corrected data data_ass = [] data_assc = [] data_acsc = [] data_acc = [] self._get_angles() num_angles = len(self._angles) workflow_prog = Progress(self, start=0.2, end=0.8, nreports=num_angles * 2) # Check sample input sam_material = mtd[self._sample_ws_name].sample().getMaterial() self._has_sample_in = \ bool(self._sample_density and self._sample_thickness and (sam_material.totalScatterXSection() + sam_material.absorbXSection())) if not self._has_sample_in: logger.warning("The sample has not been given, or the information is incomplete. Continuing but no absorption for sample will " "be computed.") # Check can input if self._use_can: can_material = mtd[self._can_ws_name].sample().getMaterial() if self._can_density and (can_material.totalScatterXSection() + can_material.absorbXSection()): self._has_can_front_in = bool(self._can_front_thickness) self._has_can_back_in = bool(self._can_back_thickness) else: logger.warning( "A can workspace was given but the can information is incomplete. Continuing but no absorption for the can will " "be computed.") if not self._has_can_front_in: logger.warning( "A can workspace was given but the can front thickness was not given. Continuing but no absorption for can front" " will be computed.") if not self._has_can_back_in: logger.warning( "A can workspace was given but the can back thickness was not given. Continuing but no absorption for can back" " will be computed.") for angle_idx in range(num_angles): workflow_prog.report('Running flat correction for angle %s' % angle_idx) angle = self._angles[angle_idx] (ass, assc, acsc, acc) = self._flat_abs(angle) logger.information('Angle %d: %f successful' % (angle_idx + 1, self._angles[angle_idx])) workflow_prog.report('Appending data for angle %s' % angle_idx) data_ass = np.append(data_ass, ass) data_assc = np.append(data_assc, assc) data_acsc = np.append(data_acsc, acsc) data_acc = np.append(data_acc, acc) log_prog = Progress(self, start=0.8, end=1.0, nreports=8) sample_logs = {'sample_shape': 'flatplate', 'sample_filename': self._sample_ws_name, 'sample_thickness': self._sample_thickness, 'sample_angle': self._sample_angle, 'emode': self._emode, 'efixed': self._efixed} dataX = self._wavelengths * num_angles # Create the output workspaces ass_ws = self._output_ws_name + '_ass' log_prog.report('Creating ass output Workspace') CreateWorkspace(OutputWorkspace=ass_ws, DataX=dataX, DataY=data_ass, NSpec=num_angles, UnitX='Wavelength', VerticalAxisUnit='SpectraNumber', ParentWorkspace=self._sample_ws_name, EnableLogging=False) log_prog.report('Adding sample logs') self._add_sample_logs(ass_ws, sample_logs) workspaces = [ass_ws] if self._use_can: log_prog.report('Adding can sample logs') AddSampleLog(Workspace=ass_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False) assc_ws = self._output_ws_name + '_assc' workspaces.append(assc_ws) log_prog.report('Creating assc output workspace') CreateWorkspace(OutputWorkspace=assc_ws, DataX=dataX, DataY=data_assc, NSpec=num_angles, UnitX='Wavelength', VerticalAxisUnit='SpectraNumber', ParentWorkspace=self._sample_ws_name, EnableLogging=False) log_prog.report('Adding assc sample logs') self._add_sample_logs(assc_ws, sample_logs) AddSampleLog(Workspace=assc_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False) acsc_ws = self._output_ws_name + '_acsc' workspaces.append(acsc_ws) log_prog.report('Creating acsc outputworkspace') CreateWorkspace(OutputWorkspace=acsc_ws, DataX=dataX, DataY=data_acsc, NSpec=num_angles, UnitX='Wavelength', VerticalAxisUnit='SpectraNumber', ParentWorkspace=self._sample_ws_name, EnableLogging=False) log_prog.report('Adding acsc sample logs') self._add_sample_logs(acsc_ws, sample_logs) AddSampleLog(Workspace=acsc_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False) acc_ws = self._output_ws_name + '_acc' workspaces.append(acc_ws) log_prog.report('Creating acc workspace') CreateWorkspace(OutputWorkspace=acc_ws, DataX=dataX, DataY=data_acc, NSpec=num_angles, UnitX='Wavelength', VerticalAxisUnit='SpectraNumber', ParentWorkspace=self._sample_ws_name, EnableLogging=False) log_prog.report('Adding acc sample logs') self._add_sample_logs(acc_ws, sample_logs) AddSampleLog(Workspace=acc_ws, LogName='can_filename', LogType='String', LogText=str(self._can_ws_name), EnableLogging=False) if self._interpolate: self._interpolate_corrections(workspaces) log_prog.report('Grouping Output Workspaces') GroupWorkspaces(InputWorkspaces=','.join(workspaces), OutputWorkspace=self._output_ws_name, EnableLogging=False) self.setPropertyValue('OutputWorkspace', self._output_ws_name) # ------------------------------------------------------------------------------ def _setup(self): self._sample_ws_name = self.getPropertyValue('SampleWorkspace') self._sample_chemical_formula = self.getPropertyValue('SampleChemicalFormula') self._sample_density_type = self.getPropertyValue('SampleDensityType') self._sample_density = self.getProperty('SampleDensity').value self._sample_thickness = self.getProperty('SampleThickness').value self._sample_angle = self.getProperty('SampleAngle').value self._can_ws_name = self.getPropertyValue('CanWorkspace') self._use_can = self._can_ws_name != '' self._can_chemical_formula = self.getPropertyValue('CanChemicalFormula') self._can_density_type = self.getPropertyValue('CanDensityType') self._can_density = self.getProperty('CanDensity').value self._can_front_thickness = self.getProperty('CanFrontThickness').value self._can_back_thickness = self.getProperty('CanBackThickness').value self._number_wavelengths = self.getProperty('NumberWavelengths').value self._interpolate = self.getProperty('Interpolate').value self._emode = self.getPropertyValue('Emode') self._efixed = self.getProperty('Efixed').value if (self._emode == 'Efixed' or self._emode == 'Direct' or self._emode == 'Indirect') and self._efixed == 0.: # Efixed mode requested with default efixed, try to read from Instrument Parameters try: self._efixed = self._getEfixed() logger.information('Found Efixed = {0}'.format(self._efixed)) except ValueError: raise RuntimeError('Efixed, Direct or Indirect mode requested with the default value,' 'but could not find the Efixed parameter in the instrument.') if self._emode == 'Efixed': logger.information('No interpolation is possible in Efixed mode.') self._interpolate = False self._output_ws_name = self.getPropertyValue('OutputWorkspace') # purge the lists self._angles = list() self._wavelengths = list() # ------------------------------------------------------------------------------ def _set_material(self, ws_name, chemical_formula, density_type, density): """ Sets the sample material for a given workspace @param ws_name :: name of the workspace to set sample material for @param chemical_formula :: Chemical formula of sample @param density_type :: 'Mass Density' or 'Number Density' @param density :: Density of sample @return pointer to the workspace with sample material set AND number density of the sample material """ set_material_alg = self.createChildAlgorithm('SetSampleMaterial') if density_type == 'Mass Density': set_material_alg.setProperty('SampleMassDensity', density) else: set_material_alg.setProperty('SampleNumberDensity', density) set_material_alg.setProperty('InputWorkspace', ws_name) set_material_alg.setProperty('ChemicalFormula', chemical_formula) set_material_alg.execute() ws = set_material_alg.getProperty('InputWorkspace').value return ws.sample().getMaterial().numberDensity # ------------------------------------------------------------------------------ def _get_angles(self): num_hist = mtd[self._sample_ws_name].getNumberHistograms() source_pos = mtd[self._sample_ws_name].getInstrument().getSource().getPos() sample_pos = mtd[self._sample_ws_name].getInstrument().getSample().getPos() beam_pos = sample_pos - source_pos self._angles = list() for index in range(0, num_hist): detector = mtd[self._sample_ws_name].getDetector(index) two_theta = detector.getTwoTheta(sample_pos, beam_pos) / self.PICONV # calc angle self._angles.append(two_theta) # ------------------------------------------------------------------------------ def _wave_range(self): if self._emode == 'Efixed': lambda_fixed = math.sqrt(81.787 / self._efixed) self._wavelengths.append(lambda_fixed) logger.information('Efixed mode, setting lambda_fixed to {0}'.format(lambda_fixed)) else: wave_range = '__WaveRange' ExtractSingleSpectrum(InputWorkspace=self._sample_ws_name, OutputWorkspace=wave_range, WorkspaceIndex=0) Xin = mtd[wave_range].readX(0) wave_min = mtd[wave_range].readX(0)[0] wave_max = mtd[wave_range].readX(0)[len(Xin) - 1] number_waves = self._number_wavelengths wave_bin = (wave_max - wave_min) / (number_waves - 1) self._wavelengths = list() for idx in range(0, number_waves): self._wavelengths.append(wave_min + idx * wave_bin) DeleteWorkspace(wave_range, EnableLogging=False) # ------------------------------------------------------------------------------ def _getEfixed(self): return_eFixed = 0. inst = mtd[self._sample_ws_name].getInstrument() if inst.hasParameter('Efixed'): return_eFixed = inst.getNumberParameter('EFixed')[0] elif inst.hasParameter('analyser'): analyser_name = inst.getStringParameter('analyser')[0] analyser_comp = inst.getComponentByName(analyser_name) if analyser_comp is not None and analyser_comp.hasParameter('Efixed'): return_eFixed = analyser_comp.getNumberParameter('EFixed')[0] if return_eFixed > 0: return return_eFixed else: raise ValueError('No non-zero Efixed parameter found') # ------------------------------------------------------------------------------ def _interpolate_corrections(self, workspaces): """ Performs interpolation on the correction workspaces such that the number of bins matches that of the input sample workspace. @param workspaces List of correction workspaces to interpolate """ for ws in workspaces: SplineInterpolation(WorkspaceToMatch=self._sample_ws_name, WorkspaceToInterpolate=ws, OutputWorkspace=ws, OutputWorkspaceDeriv='') # ------------------------------------------------------------------------------ def _add_sample_logs(self, ws, sample_logs): """ Add a dictionary of logs to a workspace. The type of the log is inferred by the type of the value passed to the log. @param ws Workspace to add logs too. @param sample_logs Dictionary of logs to append to the workspace. """ for key, value in iteritems(sample_logs): if isinstance(value, bool): log_type = 'String' elif isinstance(value, (integer_types, float)): log_type = 'Number' else: log_type = 'String' AddSampleLog(Workspace=ws, LogName=key, LogType=log_type, LogText=str(value), EnableLogging=False) # ------------------------------------------------------------------------------ def _flat_abs(self, angle): """ FlatAbs - calculate flat plate absorption factors For more information See: - MODES User Guide: http://www.isis.stfc.ac.uk/instruments/iris/data-analysis/modes-v3-user-guide-6962.pdf - C J Carlile, Rutherford Laboratory report, RL-74-103 (1974) The current implementation is based on: - J. Wuttke: 'Absorption-Correction Factors for Scattering from Flat or Tubular Samples: Open-Source Implementation libabsco, and Why it Should be Used with Caution', http://apps.jcns.fz-juelich.de/doku/sc/_media/abs00.pdf @return: A tuple containing the attenuations; 1) scattering and absorption in sample, 2) scattering in sample and absorption in sample and container 3) scattering in container and absorption in sample and container, 4) scattering and absorption in container. """ # self._sample_angle is the normal to the sample surface, i.e. # self._sample_angle = 0 means that the sample is perpendicular # to the incident beam alpha = (90.0 + self._sample_angle) * self.PICONV theta = angle * self.PICONV salpha = np.sin(alpha) if theta > (alpha + np.pi): stha = np.sin(abs(theta-alpha-np.pi)) else: stha = np.sin(abs(theta-alpha)) nlam = len(self._wavelengths) ass = np.ones(nlam) assc = np.ones(nlam) acsc = np.ones(nlam) acc = np.ones(nlam) # Scattering in direction of slab --> calculation is not reliable # Default to 1 for everything # Tolerance is 0.001 rad ~ 0.06 deg if abs(theta-alpha) < 0.001: return ass, assc, acsc, acc sample = mtd[self._sample_ws_name].sample() sam_material = sample.getMaterial() # List of wavelengths waveslengths = np.array(self._wavelengths) sst = np.vectorize(self._self_shielding_transmission) ssr = np.vectorize(self._self_shielding_reflection) ki_s, kf_s = 0, 0 if self._has_sample_in: ki_s, kf_s, ass = self._sample_cross_section_calc(sam_material, waveslengths, theta, alpha, stha, salpha, sst, ssr) # Container --> Acc, Assc, Acsc if self._use_can: ass, assc, acsc, acc = self._can_cross_section_calc(waveslengths, theta, alpha, stha, salpha, ki_s, kf_s, ass, acc, sst, ssr) return ass, assc, acsc, acc # ------------------------------------------------------------------------------ def _sample_cross_section_calc(self, sam_material, waves, theta, alpha, stha, salpha, sst, ssr): # Sample cross section (value for each of the wavelengths and for E = Efixed) sample_x_section = (sam_material.totalScatterXSection() + sam_material.absorbXSection() * waves / self.TABULATED_WAVELENGTH) * self._sample_density if self._efixed > 0: sample_x_section_efixed = (sam_material.totalScatterXSection() + sam_material.absorbXSection() * np.sqrt(self.TABULATED_ENERGY / self._efixed)) * self._sample_density elif self._emode == 'Elastic': sample_x_section_efixed = 0 # Sample --> Ass if self._emode == 'Efixed': ki_s = sample_x_section_efixed * self._sample_thickness / salpha kf_s = sample_x_section_efixed * self._sample_thickness / stha else: ki_s, kf_s = self._calc_ki_kf(waves, self._sample_thickness, salpha, stha, sample_x_section, sample_x_section_efixed) if theta < alpha or theta > (alpha + np.pi): # transmission case ass = sst(ki_s, kf_s) else: # reflection case ass = ssr(ki_s, kf_s) return ki_s, kf_s, ass # ------------------------------------------------------------------------------ def _can_cross_section_calc(self, wavelengths, theta, alpha, stha, salpha, ki_s, kf_s, ass, acc, sst, ssr): can_sample = mtd[self._can_ws_name].sample() can_material = can_sample.getMaterial() if self._has_can_front_in or self._has_can_back_in: # Calculate can cross section (value for each of the wavelengths and for E = Efixed) can_x_section = (can_material.totalScatterXSection() + can_material.absorbXSection() * wavelengths / self.TABULATED_WAVELENGTH) * self._can_density if self._efixed > 0: can_x_section_efixed = (can_material.totalScatterXSection() + can_material.absorbXSection() * np.sqrt(self.TABULATED_ENERGY / self._efixed)) * self._can_density elif self._emode == 'Elastic': can_x_section_efixed = 0 ki_c1, kf_c1, ki_c2, kf_c2 = 0, 0, 0, 0 acc1, acc2 = np.ones(len(self._wavelengths)), np.ones(len(self._wavelengths)) if self._has_can_front_in: # Front container --> Acc1 ki_c1, kf_c1, acc1 = self._can_thickness_calc(can_x_section, can_x_section_efixed, self._can_front_thickness, wavelengths, theta, alpha, stha, salpha, ssr, sst) if self._has_can_back_in: # Back container --> Acc2 ki_c2, kf_c2, acc2 = self._can_thickness_calc(can_x_section, can_x_section_efixed, self._can_back_thickness, wavelengths, theta, alpha, stha, salpha, ssr, sst) # Attenuation due to passage by other layers (sample or container) if theta < alpha or theta > (alpha + np.pi): # transmission case assc, acsc, acc = self._container_transmission_calc(acc, acc1, acc2, ki_s, kf_s, ki_c1, kf_c2, ass) else: # reflection case assc, acsc, acc = self._container_reflection_calc(acc, acc1, acc2, ki_s, kf_s, ki_c1, kf_c1, ass) return ass, assc, acsc, acc # ------------------------------------------------------------------------------ def _can_thickness_calc(self, can_x_section, can_x_section_efixed, can_thickness, wavelengths, theta, alpha, stha, salpha, ssr, sst): if self._emode == 'Efixed': ki = can_x_section_efixed * can_thickness / salpha kf = can_x_section_efixed * can_thickness / stha else: ki, kf = self._calc_ki_kf(wavelengths, can_thickness, salpha, stha, can_x_section, can_x_section_efixed) if theta < alpha or theta > (alpha + np.pi): # transmission case acc = sst(ki, kf) else: # reflection case acc = ssr(ki, kf) return ki, kf, acc # ------------------------------------------------------------------------------ def _container_transmission_calc(self, acc, acc1, acc2, ki_s, kf_s, ki_c1, kf_c2, ass): if self._has_can_front_in and self._has_can_back_in: acc = (self._can_front_thickness * acc1 * np.exp(-kf_c2) + self._can_back_thickness * acc2 * np.exp(-ki_c1)) \ / (self._can_front_thickness + self._can_back_thickness) if self._has_sample_in: acsc = (self._can_front_thickness * acc1 * np.exp(-kf_s - kf_c2) + self._can_back_thickness * acc2 * np.exp(-ki_c1 - ki_s)) / \ (self._can_front_thickness + self._can_back_thickness) else: acsc = acc assc = ass * np.exp(-ki_c1 - kf_c2) elif self._has_can_front_in: acc = acc1 if self._has_sample_in: acsc = acc1 * np.exp(-kf_s) else: acsc = acc assc = ass * np.exp(-ki_c1) elif self._has_can_back_in: acc = acc2 if self._has_sample_in: acsc = acc2 * np.exp(-ki_s) else: acsc = acc assc = ass * np.exp(-kf_c2) else: if self._has_sample_in: acsc = 0.5 * np.exp(-kf_s) + 0.5 * np.exp(-ki_s) else: acsc = acc assc = ass return assc, acsc, acc # ------------------------------------------------------------------------------ def _container_reflection_calc(self, acc, acc1, acc2, ki_s, kf_s, ki_c1, kf_c1, ass): if self._has_can_front_in and self._has_can_back_in: acc = (self._can_front_thickness * acc1 + self._can_back_thickness * acc2 * np.exp(-ki_c1 - kf_c1)) \ / (self._can_front_thickness + self._can_back_thickness) if self._has_sample_in: acsc = (self._can_front_thickness * acc1 + self._can_back_thickness * acc2 * np.exp(-ki_c1 - ki_s - kf_s - kf_c1)) \ / (self._can_front_thickness + self._can_back_thickness) else: acsc = acc assc = ass * np.exp(-ki_c1 - kf_c1) elif self._has_can_front_in: acc = acc1 if self._has_sample_in: acsc = acc1 else: acsc = acc assc = ass * np.exp(-ki_c1 - kf_c1) elif self._has_can_back_in: acc = acc2 if self._has_sample_in: acsc = acc2 * np.exp(-ki_s - kf_s) else: acsc = acc assc = ass * np.exp(-ki_c1 - kf_c1) else: if self._has_sample_in: acsc = 0.5 + 0.5 * np.exp(-ki_s - kf_s) else: acsc = acc assc = ass return assc, acsc, acc # ------------------------------------------------------------------------------ def _self_shielding_transmission(self, ki, kf): if abs(ki-kf) < 1.0e-3: return np.exp(-ki) * ( 1.0 - 0.5*(kf-ki) + (kf-ki)**2/12.0 ) else: return (np.exp(-kf)-np.exp(-ki)) / (ki-kf) # ------------------------------------------------------------------------------ def _self_shielding_reflection(self, ki, kf): return (1.0 - np.exp(-ki-kf)) / (ki+kf) # ------------------------------------------------------------------------------ def _calc_ki_kf(self, waves, thickness, sinangle1, sinangle2, x_section, x_section_efixed = 0): ki = np.ones(waves.size) kf = np.ones(waves.size) if self._emode == 'Elastic': ki = np.copy(x_section) kf = np.copy(x_section) elif self._emode == 'Direct': ki *= x_section_efixed kf = np.copy(x_section) elif self._emode == 'Indirect': ki = np.copy(x_section) kf *= x_section_efixed ki *= (thickness / sinangle1) kf *= (thickness / sinangle2) return ki, kf # ------------------------------------------------------------------------------ # Register algorithm with Mantid AlgorithmFactory.subscribe(FlatPlatePaalmanPingsCorrection)
gpl-3.0
-8,269,240,477,197,982,000
44.636986
140
0.540807
false
pienkowb/omelette
omelette/fromage/qfromage.py
1
4327
#!/usr/bin/env python import sys, os script_path = os.path.dirname(os.path.realpath(__file__)) modules_path = os.path.normcase("../../") modules_directory = os.path.join(script_path, modules_path) sys.path.append(modules_directory) from PyQt4 import QtGui, QtCore from omelette.fromage.ui import Ui_MainWindow #from omelette.fromage.fromage_editor import FromageEditor from omelette.fromage.qscintilla import QSci from omelette.fromage.actions import Actions from omelette.fromage.scalable_view import ScalableView from omelette.fromage.layouter import * class QFromage(QtGui.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) frect = self.frameGeometry() frect.moveCenter(QtGui.QDesktopWidget().availableGeometry().center()) self.move(frect.topLeft()) self.setupUi(self) self.hlayout = QtGui.QHBoxLayout(self.centralwidget) self.vlayout = QtGui.QVBoxLayout(self.dockContents) self.splitter = QtGui.QSplitter(self.centralwidget) self.splitter.setOrientation(QtCore.Qt.Horizontal) self.qsci = QSci(self.splitter) #self.qsci = FromageEditor(self.splitter) self.scene = QtGui.QGraphicsScene(self.splitter) self.scalable_view = ScalableView(self.splitter) self.scalable_view.setScene(self.scene) self.scene.setSceneRect(QtCore.QRectF(0, 0, 500, 500)) self.splitter.setSizes([1,1]) self.msg_view = QtGui.QTableWidget() self.msg_view.setColumnCount(3) self.msg_view.setHorizontalHeaderLabels(["Marker", "Line number", "Message"]) self.msg_view.horizontalHeader().setStretchLastSection(True) self.msg_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.msg_view.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) for i in range(3): self.msg_view.horizontalHeaderItem(i).setTextAlignment(0x0001) self.vlayout.addWidget(self.msg_view) self.hlayout.addWidget(self.splitter) layouts_names = LayoutFactory.layouts() for name in layouts_names: self.layout = QtGui.QAction(self) self.layout.setObjectName(QtCore.QString.fromUtf8(name)) self.layout.setCheckable(True) self.layout.setText(name) self.menuLayout.addAction(self.layout) self.actions = Actions(self) QtCore.QObject.connect(self.actionGenerate, QtCore.SIGNAL("triggered()"), self.actions.generate) QtCore.QObject.connect(self.actionNew, QtCore.SIGNAL("triggered()"), self.actions.new_file) QtCore.QObject.connect(self.actionOpen, QtCore.SIGNAL("triggered()"), self.actions.open_file) QtCore.QObject.connect(self.actionSave, QtCore.SIGNAL("triggered()"), self.actions.save_file) QtCore.QObject.connect(self.actionSaveAs, QtCore.SIGNAL("triggered()"), self.actions.save_file_as) QtCore.QObject.connect(self.actionCut, QtCore.SIGNAL("triggered()"), self.actions.cut) QtCore.QObject.connect(self.actionCopy, QtCore.SIGNAL("triggered()"), self.actions.copy) QtCore.QObject.connect(self.actionPaste, QtCore.SIGNAL("triggered()"), self.actions.paste) QtCore.QObject.connect(self.actionUndo, QtCore.SIGNAL("triggered()"), self.actions.undo) QtCore.QObject.connect(self.actionRedo, QtCore.SIGNAL("triggered()"), self.actions.redo) QtCore.QObject.connect(self.qsci, QtCore.SIGNAL("textChanged()"), self.actions.enable_save) QtCore.QObject.connect(self.actionExport, QtCore.SIGNAL("triggered()"), self.actions.export) QtCore.QObject.connect(self.menuLayout, QtCore.SIGNAL("triggered(QAction*)"), self.actions.get_layout_name) QtCore.QObject.connect(self.menuLayout, QtCore.SIGNAL("triggered(QAction*)"), self.actions.check_layout_item) QtCore.QObject.connect(self.msg_view, QtCore.SIGNAL("cellDoubleClicked(int,int)"), self.actions.jump_to_line) if __name__ == "__main__": app = QtGui.QApplication(sys.argv) form = QFromage() if len(sys.argv) == 2: with open(sys.argv[1], 'r') as f: for line in f: form.qsci.append(line) form.actions.generate() form.show() else: form.show() sys.exit(app.exec_())
gpl-3.0
-519,461,222,744,091,100
44.547368
117
0.692628
false
dhague/vpower
SpeedCadenceSensorRx.py
1
5424
from ant.core import event, message, node from ant.core.constants import * from constants import * from config import NETKEY, VPOWER_DEBUG # Receiver for Speed and/or Cadence ANT+ sensor class SpeedCadenceSensorRx(event.EventCallback): def __init__(self, antnode, sensor_type, sensor_id): self.sensor_type = sensor_type self.sensor_id = sensor_id self.currentData = None self.previousData = None self.revsPerSec = 0.0 self.observer = None # Get the channel self.channel = antnode.getFreeChannel() self.channel.name = 'C:SPEED' network = node.Network(NETKEY, 'N:ANT+') self.channel.assign(network, CHANNEL_TYPE_TWOWAY_RECEIVE) self.channel.setID(sensor_type, sensor_id, 0) self.channel.searchTimeout = TIMEOUT_NEVER if sensor_type == SPEED_DEVICE_TYPE: period = 8118 elif sensor_type == CADENCE_DEVICE_TYPE: period = 8102 elif sensor_type == SPEED_CADENCE_DEVICE_TYPE: period = 8086 self.channel.period = period self.channel.frequency = 57 def set_revs_per_sec(self, rps): self.revsPerSec = rps if self.observer: self.observer.update(self.revsPerSec) def notify_change(self, observer): self.observer = observer def open(self): self.channel.open() self.channel.registerCallback(self) # -> will callback process(msg) method below def close(self): self.channel.close() def unassign(self): self.channel.unassign() def stopped(self): # Question: how to detect if we are stopped? # Answer: heuristic - record timestamps of messages. If > 1 second between messages with # no change in speed data then we are stopped. # TODO return False def process(self, msg, channel): if isinstance(msg, message.ChannelBroadcastDataMessage): dp = None # Get the datapage according to the configured device type if self.sensor_type == SPEED_DEVICE_TYPE: dp = SpeedDataPage() elif self.sensor_type == CADENCE_DEVICE_TYPE: dp = CadenceDataPage() elif self.sensor_type == SPEED_CADENCE_DEVICE_TYPE: dp = SpeedCadenceDataPage() if dp is None: return # Parse the incoming message into a SpeedCadenceData object message_data = SpeedCadenceData() dp.parse(msg.data, message_data) if VPOWER_DEBUG: message_data.print_speed() if self.currentData is None: self.previousData = self.currentData self.currentData = message_data return if not self.stopped() and message_data.speedEventTime != self.currentData.speedEventTime: # Calculate speed from previously-held data, if there is a change self.previousData = self.currentData self.currentData = message_data if self.previousData is not None: current_event_time = self.currentData.speedEventTime if current_event_time < self.previousData.speedEventTime: current_event_time += 65536 / 1024.0 time_diff = current_event_time - self.previousData.speedEventTime current_rev_count = self.currentData.speedRevCount if current_rev_count < self.previousData.speedRevCount: current_rev_count += 65536 revs_diff = current_rev_count - self.previousData.speedRevCount self.set_revs_per_sec(revs_diff / time_diff) elif isinstance(msg, message.ChannelStatusMessage): if msg.status == EVENT_CHANNEL_CLOSED: # Channel closed, re-open open() class SpeedCadenceData: def __init__(self): self.speedRevCount = None self.speedEventTime = None self.cadenceRevCount = None self.cadenceEventTime = None def print_speed(self): print('speedRevCount: ', self.speedRevCount) print('speedEventTime: ', self.speedEventTime) def print_cadence(self): print('cadenceRevCount: ', self.cadenceRevCount) print('cadenceEventTime: ', self.cadenceEventTime) class DataPage(object): @staticmethod def parse_event_time(payload, offset): return (payload[offset] | (payload[offset + 1] << 8)) / 1024.0 @staticmethod def parse_rev_count(payload, offset): return payload[offset] | (payload[offset + 1] << 8) class SpeedDataPage(DataPage): def parse(self, payload, data): data.speedEventTime = self.parse_event_time(payload, 4) data.speedRevCount = self.parse_rev_count(payload, 6) class CadenceDataPage(DataPage): def parse(self, payload, data): data.cadenceEventTime = self.parse_event_time(payload, 4) data.cadenceRevCount = self.parse_rev_count(payload, 6) class SpeedCadenceDataPage(DataPage): def parse(self, payload, data): data.cadenceEventTime = self.parse_event_time(payload, 0) data.cadenceRevCount = self.parse_rev_count(payload, 2) data.speedEventTime = self.parse_event_time(payload, 4) data.speedRevCount = self.parse_rev_count(payload, 6)
mit
-1,974,359,766,318,043,600
35.897959
101
0.620391
false
ajdawson/biggus
biggus/__init__.py
1
104004
# (C) British Crown Copyright 2012 - 2015, Met Office # # This file is part of Biggus. # # Biggus is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Biggus is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Biggus. If not, see <http://www.gnu.org/licenses/>. """ Virtual arrays of arbitrary size, with arithmetic and statistical operations, and conversion to NumPy ndarrays. Virtual arrays can be stacked to increase their dimensionality, or tiled to increase their extent. Includes support for easily wrapping data sources which produce NumPy ndarray objects via slicing. For example: netcdf4python Variable instances, and NumPy ndarray instances. All operations are performed in a lazy fashion to avoid overloading system resources. Conversion to a concrete NumPy ndarray requires an explicit method call. For example:: # Wrap two large data sources (e.g. 52000 x 800 x 600). measured = OrthoArrayAdapter(netcdf_var_a) predicted = OrthoArrayAdapter(netcdf_var_b) # No actual calculations are performed here. error = predicted - measured # *Appear* to calculate the mean over the first dimension, and # return a new biggus Array with the correct shape, etc. # NB. No data are read and no calculations are performed. mean_error = biggus.mean(error, axis=0) # *Actually* calculate the mean, and return a NumPy ndarray. # This is when the data are read, subtracted, and the mean derived, # but all in a chunk-by-chunk fashion which avoids using much # memory. mean_error = mean_error.ndarray() """ from __future__ import division from abc import ABCMeta, abstractproperty, abstractmethod import __builtin__ import collections import itertools import threading import Queue import numpy as np import numpy.ma as ma __version__ = '0.10.0' _SCALAR_KEY_TYPES = (int, np.integer) _KEY_TYPES = _SCALAR_KEY_TYPES + (slice, tuple, np.ndarray) def _is_scalar(key): return isinstance(key, (int, np.integer)) class AxisSupportError(StandardError): """Raised when the operation is not supported over a given axis/axes.""" class Engine(object): """ Represents a way to evaluate lazy expressions. """ __metaclass__ = ABCMeta @abstractmethod def masked_arrays(self, *arrays): """ Return a list of MaskedArray objects corresponding to the given biggus Array objects. This can be more efficient (and hence faster) than converting the individual arrays one by one. """ pass @abstractmethod def ndarrays(self, *arrays): """ Return a list of NumPy ndarray objects corresponding to the given biggus Array objects. This can be more efficient (and hence faster) than converting the individual arrays one by one. """ pass Chunk = collections.namedtuple('Chunk', 'keys data') QUEUE_FINISHED = None QUEUE_ABORT = Exception class Node(object): """A node of an expression evaluation graph.""" __metaclass__ = ABCMeta def __init__(self): self.output_queues = [] def abort(self): """Send the abort signal to all registered output queues.""" for queue in self.output_queues: queue.put(QUEUE_ABORT) def add_output_queue(self, output_queue): """ Register a queue so it will receive the output Chunks from this Node. """ self.output_queues.append(output_queue) def output(self, chunk): """ Dispatch the given Chunk onto all the registered output queues. If the chunk is None, it is silently ignored. """ if chunk is not None: for queue in self.output_queues: queue.put(chunk) @abstractmethod def run(self): pass def thread(self): """Start a new daemon thread which executes the `run` method.""" thread = threading.Thread(target=self.run, name=str(self)) thread.daemon = True thread.start() return thread class ProducerNode(Node): """ A data-source node in an expression evaluation graph. A ProducerNode corresponds to an Array which simply contains its source data. The relevant Array classes are: `NumpyArrayAdapter`, `OrthoArrayAdapater`, `ArrayStack`, and `LinearMosaic`. """ def __init__(self, array, iteration_order, masked): assert array.ndim == len(iteration_order) self.array = array self.iteration_order = iteration_order self.masked = masked super(ProducerNode, self).__init__() def run(self): """ Emit the Chunk instances which cover the underlying Array. The Array is divided into chunks with a size limit of MAX_CHUNK_SIZE which are emitted into all registered output queues. """ try: # We always slice up the Array into the same chunks, but # the order that we traverse those chunks depends on # `self.iteration_order`. # We use `numpy.ndindex` to iterate through all the chunks, # but since it always iterates over the last dimension first # we have to transpose `all_cuts` and `cut_shape` ourselves. # Then we have to invert the transposition once we have # indentified the relevant slices. all_cuts = _all_slices_inner(self.array.dtype.itemsize, self.array.shape, always_slices=True) all_cuts = [all_cuts[i] for i in self.iteration_order] cut_shape = tuple(len(cuts) for cuts in all_cuts) inverse_order = [self.iteration_order.index(i) for i in range(len(self.iteration_order))] for cut_indices in np.ndindex(*cut_shape): key = tuple(cuts[i] for cuts, i in zip(all_cuts, cut_indices)) key = tuple(key[i] for i in inverse_order) # Now we have the slices that describe the next chunk. # For example, key might be equivalent to # `[11:12, 0:3, :, :]`. # Simply "realise" the data for that region and emit it # as a Chunk to all registered output queues. if self.masked: data = self.array[key].masked_array() else: data = self.array[key].ndarray() output_chunk = Chunk(key, data) self.output(output_chunk) except: self.abort() raise else: for queue in self.output_queues: queue.put(QUEUE_FINISHED) class ConsumerNode(Node): """ A computation/result-accumulation node in an expression evaluation graph. A ConsumerNode corresponds to either: an Array which is computed from one or more other Arrays; or a container for the result of an expressions, such as an in-memory array or file. """ def __init__(self): self.input_queues = [] super(ConsumerNode, self).__init__() def add_input_nodes(self, input_nodes): """ Set the given nodes as inputs for this node. Creates a limited-size Queue.Queue for each input node and registers each queue as an output of its corresponding node. """ self.input_queues = [Queue.Queue(maxsize=3) for _ in input_nodes] for input_node, input_queue in zip(input_nodes, self.input_queues): input_node.add_output_queue(input_queue) @abstractmethod def finalise(self): """ Return any remaining partial results. Called once all the input chunks have been processed. Returns ------- Chunk or None """ pass @abstractmethod def process_chunks(self, chunks): """Process one chunk from each input node.""" pass def run(self): """ Process the input queues in lock-step, and push any results to the registered output queues. """ try: while True: input_chunks = [input.get() for input in self.input_queues] for input in self.input_queues: input.task_done() if any(chunk is QUEUE_ABORT for chunk in input_chunks): self.abort() return if any(chunk is QUEUE_FINISHED for chunk in input_chunks): break self.output(self.process_chunks(input_chunks)) # Finalise the final chunk (process_chunks does this for all # but the last chunk). self.output(self.finalise()) except: self.abort() raise else: for queue in self.output_queues: queue.put(QUEUE_FINISHED) class StreamsHandlerNode(ConsumerNode): """ A node in an expression graph corresponding to an Array with a `streams_handler` method. """ def __init__(self, array, streams_handler): self.array = array self.streams_handler = streams_handler super(StreamsHandlerNode, self).__init__() def finalise(self): return self.streams_handler.finalise() def input_iteration_order(self, iteration_order): return self.streams_handler.input_iteration_order(iteration_order) def process_chunks(self, chunks): return self.streams_handler.process_chunks(chunks) class NdarrayNode(ConsumerNode): """ An in-memory result node in an expression evaluation graph. An NdarrayNode corresponds to either a numpy ndarray instance or a MaskedArray instance. """ def __init__(self, array, masked): if masked: self.result = np.ma.empty(array.shape, dtype=array.dtype) else: self.result = np.empty(array.shape, dtype=array.dtype) super(NdarrayNode, self).__init__() def abort(self): self.result = None def finalise(self): pass def process_chunks(self, chunks): """ Store the incoming chunk at the corresponding position in the result array. """ chunk, = chunks if chunk.keys: self.result[chunk.keys] = chunk.data else: self.result[...] = chunk.data class AllThreadedEngine(Engine): """ Evaluates lazy expressions by creating a thread for each node in the expression graph. """ class Group(object): """ A collection of Array instances which are to be evaluated in parallel. """ def __init__(self, arrays, indices): """ Creates a collection of Array instances and their corresponding indices into the overall list of results. Parameters ---------- arrays : iterable of biggus.Array instances indices : iterable of int """ self.arrays = arrays self.indices = indices self._node_cache = {} def __repr__(self): return 'Group({}, {})'.format(self.arrays, self.indices) def _make_node(self, array, iteration_order, masked): cache_key = id(array) node = self._node_cache.get(cache_key, None) if node is None: if hasattr(array, 'streams_handler'): node = StreamsHandlerNode(array, array.streams_handler(masked)) iteration_order = node.input_iteration_order( iteration_order) input_nodes = [self._make_node(input_array, iteration_order, masked) for input_array in array.sources] node.add_input_nodes(input_nodes) else: node = ProducerNode(array, iteration_order, masked) self._node_cache[cache_key] = node return node def evaluate(self, masked): """ Convert each of the Array instances in this group into its corresponding ndarray/MaskedArray. Parameters ---------- masked : bool Whether to use ndarray or MaskedArray computations. Returns ------- list of ndarray or MaskedArray instances """ # Construct nodes starting from the producers. result_nodes = [] result_threads = [] for array in self.arrays: iteration_order = range(array.ndim) node = self._make_node(array, iteration_order, masked) result_node = NdarrayNode(array, masked) result_node.add_input_nodes([node]) result_threads.append(result_node.thread()) result_nodes.append(result_node) # Start up all the producer/computation threads. for node in self._node_cache.itervalues(): node.thread() # Wait for the result threads to finish. for thread in result_threads: thread.join() results = [node.result for node in result_nodes] if any(result is None for result in results): raise Exception('error during evaluation') return results def _groups(self, arrays): # XXX Placeholder implementation which assumes everything # is compatible and can be evaluated in parallel. return [self.Group(arrays, range(len(arrays)))] def _evaluate(self, arrays, masked): # Figure out which arrays should be evaluated in parallel. groups = self._groups(arrays) # Compile the results. all_results = [None] * len(arrays) for group in groups: ndarrays = group.evaluate(masked) for i, ndarray in zip(group.indices, ndarrays): all_results[i] = ndarray return all_results def masked_arrays(self, *arrays): return self._evaluate(arrays, True) def ndarrays(self, *arrays): return self._evaluate(arrays, False) engine = AllThreadedEngine() """ The current lazy evaluation engine. Defaults to an instance of :class:`AllThreadedEngine`. """ class Array(object): """ A virtual array which can be sliced to create smaller virtual arrays, or converted to a NumPy ndarray. """ __metaclass__ = ABCMeta __hash__ = None #: Indicates to client code that the object supports #: "orthogonal indexing", which means that slices that are 1d arrays #: or lists slice along each dimension independently. This behavior #: is similar to Fortran or Matlab, but different than numpy. __orthogonal_indexing__ = True def __array__(self, dtype=None): result = self.ndarray() return np.asarray(result, dtype=dtype) def __str__(self): fmt = '<Array shape=({}) dtype={!r} size={}>' return fmt.format(', '.join(str(items) for items in self.shape), self.dtype, size(self)) def __repr__(self): fmt = '<{} shape=({}) dtype={!r}>' return fmt.format(type(self).__name__, ', '.join(str(items) for items in self.shape), self.dtype) @property def fill_value(self): """The value used to fill in masked values where necessary.""" return np.ma.empty(0, dtype=self.dtype).fill_value @property def nbytes(self): """The total number of bytes required to store the array data.""" return int(np.product(self.shape) * self.dtype.itemsize) @property def ndim(self): """The number of dimensions in this virtual array.""" return len(self.shape) @abstractproperty def dtype(self): """The datatype of this virtual array.""" def astype(self, dtype): """Copy of the array, cast to a specified type.""" return AsDataTypeArray(self, dtype) @abstractproperty def shape(self): """The shape of the virtual array as a tuple.""" def __getitem__(self, keys): """Returns a new Array by slicing using _getitem_full_keys.""" keys = _full_keys(keys, self.ndim) # To prevent numpy complaining about "None in arr" we don't # simply do "np.newaxis in keys". new_axis_exists = any(key is np.newaxis for key in keys) if new_axis_exists: # Make a NewAxisArray containing this array and 0 new axes. array = NewAxesArray(self, [0] * (self.ndim + 1)) indexed_array = array[keys] else: indexed_array = self._getitem_full_keys(keys) return indexed_array def _getitem_full_keys(self, keys): """ Returns a new Array by slicing this virtual array. Parameters ---------- keys - iterable of keys The keys to index the array with. The default ``__getitem__`` removes all ``np.newaxis`` objects, and will be of length array.ndim. Note: This method must be overridden if ``__getitem__`` is defined by :meth:`Array.__getitem__`. """ raise NotImplementedError('_getitem_full_keys should be overridden.') @abstractmethod def ndarray(self): """ Returns the NumPy ndarray instance that corresponds to this virtual array. """ @abstractmethod def masked_array(self): """ Returns the NumPy MaskedArray instance that corresponds to this virtual array. """ def _normalise_keys(self, keys): if not isinstance(keys, tuple): keys = (keys,) # This weird check is safe against keys[-1] being an ndarray. if keys and isinstance(keys[-1], type(Ellipsis)): keys = keys[:-1] if len(keys) > self.ndim: raise IndexError('too many keys') for key in keys: if not(isinstance(key, _KEY_TYPES)): raise TypeError('invalid index: {!r}'.format(key)) return keys def transpose(self, axis=None): """ Permute the dimensions of the array. Parameters ---------- axes - list of ints, optional By default, reverse the dimensions, otherwise permute the axes according to the values given. """ return TransposedArray(self, axis) def __add__(self, other): try: return add(self, other) except TypeError: return NotImplemented def __sub__(self, other): try: return sub(self, other) except TypeError: return NotImplemented def __mul__(self, other): try: return multiply(self, other) except TypeError: return NotImplemented def __floordiv__(self, other): try: return floor_divide(self, other) except TypeError: return NotImplemented # In Python 2 we implement "/" as floor division. When divide is imported # from __future__ it is __truediv__ which is called in both Python 2 & 3. __div__ = __floordiv__ def __truediv__(self, other): try: return true_divide(self, other) except TypeError: return NotImplemented def __pow__(self, other): # n.b. __builtin__.pow() allows a modulus. That interface is not # supported here as it isn't clear what the benefit is at this stage. try: return power(self, other) except TypeError: return NotImplemented class ArrayContainer(Array): "A biggus.Array which passes calls through to the contained array." def __init__(self, contained_array): self.array = contained_array def __repr__(self): return 'ArrayContainer({!r})'.format(self.array) @property def dtype(self): return self.array.dtype @property def shape(self): return self.array.shape def __getitem__(self, keys): # Pass indexing to the contained array. For ArrayContainer types # which implement their own complex __getitem__ behaviour, # overriding this and _getitem_full_keys may be necessary. See also # BroadcastArray and TransposedArray. return self.array.__getitem__(keys) def ndarray(self): try: return self.array.ndarray() except AttributeError: return np.array(self.array) def masked_array(self): try: return self.array.masked_array() except AttributeError: return np.ma.masked_array(self.array) class NewAxesArray(ArrayContainer): def __init__(self, array, new_axes): """ Creates an array which has new axes (i.e. length 1) at the specified locations. Parameters ---------- array - array like The array upon which to put new axes new_axes - iterable of length array.ndim + 1 The number of new axes for each axis. e.g. [2, 1, 0] for a 2d array gain two new axes on the left hand side, one in the middle, and 0 on the right hand side. """ super(NewAxesArray, self).__init__(array) if array.ndim + 1 != len(new_axes): raise ValueError('The new_axes must have length {} but was ' 'actually length {}.'.format(array.ndim + 1, len(new_axes))) new_axes = np.array(new_axes) dtype_kind = new_axes.dtype.type if (not issubclass(dtype_kind, np.integer) or np.any(new_axes < 0)): raise ValueError('Only positive integer types may be used for ' 'new_axes.') self._new_axes = new_axes @property def ndim(self): return np.sum(self._new_axes) + self.array.ndim @property def shape(self): shape = list(self.array.shape) # Starting from the higher dimensions, insert 1s at the locations # of new axes. for axes, n_new_axes in reversed(list(enumerate(self._new_axes))): for _ in range(n_new_axes): shape.insert(axes, 1) return tuple(shape) def _newaxis_keys(self): # Compute the keys needed to produce an array of appropriate newaxis. keys = [slice(None)] * self.array.ndim # Starting from the higher dimensions, insert np.newaxis at the # locations of new axes. for axes, n_new_axes in reversed(list(enumerate(self._new_axes))): for _ in range(n_new_axes): keys.insert(axes, np.newaxis) return tuple(keys) def _is_newaxis(self): is_newaxis = [False] * self.array.ndim for axes, n_new_axes in reversed(list(enumerate(self._new_axes))): for _ in range(n_new_axes): is_newaxis.insert(axes, True) return tuple(is_newaxis) def __getitem__(self, keys): # We don't want to implement _getitem_full_keys here, as we # don't want a potentially deep nesting of NewAxesArrays. Instead # we work out where newaxis objects are in the existing array, and # add any new ones that are requested. keys = _full_keys(keys, self.ndim) new_axes = self._new_axes # Strip out an deal with any keys which are for new axes. new_axes = new_axes.copy() axes_to_combine = [] is_newaxis = list(self._is_newaxis()) contained_array_keys = [] existing_array_axis = 0 broadcast_dict = {} for key_index, key in enumerate(keys): if key is np.newaxis: new_axes[existing_array_axis] += 1 continue if is_newaxis.pop(0): # We're indexing a new_axes axes. if _is_scalar(key): if -1 <= key < 1: new_axes[existing_array_axis] -= 1 else: raise IndexError('index {} is out of bounds for axis ' '{} with size 1'.format(key, key_index)) elif isinstance(key, slice): new_size = len(range(*key.indices(1))) if new_size != 1: broadcast_dict[key_index] = new_size elif isinstance(key, tuple): for index in key: if not -1 <= index < 1: raise IndexError('index {} is out of bounds for ' 'axis {} with size 1' ''.format(key, key_index)) broadcast_dict[key_index] = len(key) else: raise NotImplementedError('NewAxesArray indexing not yet ' 'supported for {} keys.' ''.format(type(key).__name__)) else: # We're indexing a dimension of self.array. if _is_scalar(key): # One of the dimensions of the existing data is to be # removed, so we can combine the new_axes to the left # and right of this axes into a single value. axes_to_combine.append(existing_array_axis) contained_array_keys.append(key) existing_array_axis += 1 new_axes = list(new_axes) for axis in sorted(axes_to_combine, reverse=True): new_axes[axis] += new_axes.pop(axis + 1) new_array = NewAxesArray(self.array[tuple(contained_array_keys)], new_axes) if broadcast_dict: new_array = BroadcastArray(new_array, broadcast_dict) return new_array def ndarray(self): array = super(NewAxesArray, self).ndarray() return array.__getitem__(self._newaxis_keys()) def masked_array(self): array = super(NewAxesArray, self).masked_array() return array.__getitem__(self._newaxis_keys()) class BroadcastArray(ArrayContainer): def __init__(self, array, broadcast, leading_shape=()): """ Parameters ---------- array : array like The array to broadcast. Only length 1 dimensions, or those already being broadcast, may be (further) broadcast. broadcast : dict A mapping of broadcast axis to broadcast length. leading_shape : iterable A shape to put on the leading dimension of the array. >>> array = BroadcastArray(np.empty([1, 4]), ... broadcast={0: 10}, ... leading_shape=(5,)) >>> array.shape (5, 10, 4) """ # To avoid nesting broadcast arrays within broadcast arrays, we # simply copy the existing broadcast, and apply this broadcast on # top of it (in a new BroadcastArray instance). if isinstance(array, BroadcastArray): new_broadcast_dict = array._broadcast_dict.copy() leading_shape = tuple(leading_shape) + array._leading_shape array = array.array new_broadcast_dict.update(broadcast) broadcast = new_broadcast_dict super(BroadcastArray, self).__init__(array) self._broadcast_dict = broadcast # Compute the broadcast shape. shape = self._shape_from_broadcast_dict(self.array.shape, broadcast) for length in leading_shape: if length < 1: raise ValueError('Leading shape must all be >=1.') self._leading_shape = tuple(leading_shape) self._broadcast_shape = tuple(shape) self._shape = self._leading_shape + self._broadcast_shape @property def shape(self): return self._shape def __getitem__(self, keys): # Inherit the behaviour from Array, **not** from ArrayContainer. return super(ArrayContainer, self).__getitem__(keys) def _getitem_full_keys(self, keys): array_keys = [] new_broadcast_dict = {} axis_offset = 0 # Take off the leading shape, and use the sliced_shape functionality # to compute the new leading shape size. leading_shape = self._leading_shape leading_shape_len = len(leading_shape) leading_shape = _sliced_shape(leading_shape, keys[:leading_shape_len]) axis_offset -= leading_shape_len n_axes_removed = 0 for axis, key in enumerate(keys[leading_shape_len:], start=leading_shape_len): concrete_axis = axis + axis_offset if concrete_axis not in self._broadcast_dict: if _is_scalar(key): n_axes_removed += 1 array_keys.append(key) else: existing_size = self._shape[axis] if isinstance(key, slice): # We just want to preserve the dimension. We will deal # with the broadcasting of the length. array_keys.append(slice(None)) # TODO: Compute this without creating a range object. size = len(range(*key.indices(existing_size))) new_broadcast_dict[concrete_axis - n_axes_removed] = size elif _is_scalar(key): if not -existing_size <= key < existing_size: raise IndexError('index {} is out of bounds for axis ' '{} with size 1'.format(key, axis)) else: # We want to index the broadcast dimension. array_keys.append(key) n_axes_removed += 1 else: raise NotImplementedError('Indexing with type {} not yet ' 'implemented.' ''.format(type(key))) # Try to avoid a copy of self.array if we can. if all(key == slice(None) for key in array_keys): sub_array = self.array else: sub_array = self.array[tuple(array_keys)] return type(self)(sub_array, new_broadcast_dict, leading_shape) @classmethod def broadcast_arrays(cls, array1, array2): """ Broadcast two arrays against each other. Returns ------- broadcast_array1 : array1 or a broadcast of array1 broadcast_array2 : array2 or a broadcast of array2 The returned arrays will be broadcast against each other. Any array which is already in full broadcast shape will be returned unchanged. """ shape, bcast_kwargs1, bcast_kwargs2 = ( cls._compute_broadcast_kwargs(array1.shape, array2.shape)) if any(bcast_kwargs1.values()): array1 = cls(array1, **bcast_kwargs1) if any(bcast_kwargs2.values()): array2 = cls(array2, **bcast_kwargs2) return array1, array2 @staticmethod def _compute_broadcast_shape(shape1, shape2): """ Given two shapes, use numpy's broadcasting rules to compute the broadcasted shape. """ # Rule 1: If the two arrays differ in their number of dimensions, the # shape of the array with fewer dimensions is padded with ones on its # leading (left) side. s1, s2 = list(shape1), list(shape2) len_diff = len(s1) - len(s2) if len_diff > 0: s2[0:0] = [1] * len_diff else: s1[0:0] = [1] * -len_diff # Rule 2: If the shape of the two arrays does not match in any # dimension, the array with shape equal to 1 in that dimension is # stretched to match the other shape. shape = [] for size1, size2 in zip(s1, s2): if size1 == size2: shape.append(size1) elif size1 == 1: shape.append(size2) elif size2 == 1: shape.append(size1) else: # Rule 3: If in any dimension the sizes disagree and neither is # equal to 1, an error is raised. raise ValueError('operands could not be broadcast together ' 'with shapes ({}) ({})' ''.format(','.join(map(str, shape1)), ','.join(map(str, shape2)))) return tuple(shape) @classmethod def _compute_broadcast_kwargs(cls, shape1, shape2): """ Given two shapes, compute the broadcast shape, along with the keywords needed to produce BroadcastArrays with arrays of given shape. Parameters ---------- shape1 : iterable shape2 : iterable The two shapes to broadcast against one another. Returns ------- full_shape : iterable The full broadcast shape. broadcast_kwargs1 : dict broadcast_kwargs2 : dict Keywords which are suitably passed through to a BroadcastArray to take the original array, and broadcast to the full broadcast shape. """ full_shape = cls._compute_broadcast_shape(shape1, shape2) bcast_kwargs1 = {'broadcast': {}, 'leading_shape': ()} bcast_kwargs2 = {'broadcast': {}, 'leading_shape': ()} ndim_diff = len(shape1) - len(shape2) s1_offset = s2_offset = 0 if ndim_diff > 0: s2_offset = ndim_diff bcast_kwargs2['leading_shape'] = full_shape[:s2_offset] elif len(shape1) < len(shape2): s1_offset = abs(ndim_diff) bcast_kwargs1['leading_shape'] = full_shape[:s1_offset] for ax, (full, s1) in enumerate(zip(full_shape[s1_offset:], shape1)): if full != s1: bcast_kwargs1['broadcast'][ax] = full for ax, (full, s2) in enumerate(zip(full_shape[s2_offset:], shape2)): if full != s2: bcast_kwargs2['broadcast'][ax] = full return full_shape, bcast_kwargs1, bcast_kwargs2 @classmethod def _shape_from_broadcast_dict(cls, orig_shape, broadcast_dict): """Using a broadcast dictionary, compute the broadcast shape.""" shape = list(orig_shape) for axis, length in broadcast_dict.items(): if not 0 <= axis < len(shape): raise ValueError('Axis {} out of range [0, {})' ''.format(axis, len(shape))) if length < 0: raise ValueError('Axis length must be positive. Got {}.' ''.format(length)) if shape[axis] != 1: raise ValueError('Attempted to broadcast axis {} which is of ' 'length {}.'.format(axis, shape[axis])) shape[axis] = length return tuple(shape) @classmethod def _broadcast_numpy_array(cls, array, broadcast_dict, leading_shape=()): """Broadcast a numpy array according to the broadcast_dict.""" from numpy.lib.stride_tricks import as_strided shape = cls._shape_from_broadcast_dict(array.shape, broadcast_dict) shape = tuple(leading_shape) + shape strides = [0] * len(leading_shape) + list(array.strides) for broadcast_axis in broadcast_dict: strides[broadcast_axis + len(leading_shape)] = 0 return as_strided(array, shape=tuple(shape), strides=tuple(strides)) def ndarray(self): array = super(BroadcastArray, self).ndarray() return self._broadcast_numpy_array(array, self._broadcast_dict, self._leading_shape) def masked_array(self): ma = super(BroadcastArray, self).masked_array() mask = ma.mask array = self._broadcast_numpy_array(ma.data, self._broadcast_dict, self._leading_shape) if isinstance(mask, np.ndarray): mask = self._broadcast_numpy_array(mask, self._broadcast_dict, self._leading_shape) return np.ma.masked_array(array, mask=mask) class AsDataTypeArray(ArrayContainer): def __init__(self, array, dtype): """ Cast the given array to the specified dtype. Parameters ---------- array : array like The array to cast to ``dtype``. dtype : valid numpy.dtype argument The dtype to cast the data to. This will be passed through to :func:`numpy.dtype`. """ super(AsDataTypeArray, self).__init__(array) self._dtype = np.dtype(dtype) @property def dtype(self): return self._dtype def astype(self, dtype): return type(self)(self.array, dtype) def __getitem__(self, keys): # Apply the indexing to the contained array, then instantly # re-apply the astype. return type(self)(super(AsDataTypeArray, self).__getitem__(keys), self.dtype) def ndarray(self): return super(AsDataTypeArray, self).ndarray().astype(self.dtype) def masked_array(self): dtype = self._dtype return super(AsDataTypeArray, self).masked_array().astype(self.dtype) class ConstantArray(Array): """ An Array which is completely filled with a single value. Parameters ---------- shape : int or sequence of ints The shape for the new Array. value : obj, optional The value to fill the Array with. Defaults to 0.0. dtype : obj, optional Object to be converted to data type. Default is None which instructs the data type to be determined as the minimum required to hold the given value. Returns ------- Array An Array entirely filled with 'value'. """ def __init__(self, shape, value=0.0, dtype=None): if isinstance(shape, basestring): shape = (shape,) else: try: shape = tuple(shape) except TypeError: shape = (shape,) self._shape = tuple(map(int, shape)) data = np.array([value], dtype=dtype) self.value = data[0] self._dtype = data.dtype @property def dtype(self): return self._dtype @property def shape(self): return self._shape def __getitem__(self, keys): # newaxis is handled within _sliced_shape, so we override __getitem__, # not _getitem_full_keys. shape = _sliced_shape(self.shape, keys) return ConstantArray(shape, self.value, self._dtype) def ndarray(self): result = np.empty(self.shape, self._dtype) result.fill(self.value) return result def masked_array(self): result = np.ma.empty(self.shape, self._dtype) result.fill(self.value) return result def zeros(shape, dtype=float): """ Return an Array which is completely filled with zeros. Parameters ---------- shape : int or sequence of ints The shape for the new Array. dtype : obj, optional Object to be converted to data type. Default is `float`. Returns ------- Array An Array entirely filled with zeros. """ return ConstantArray(shape, dtype=dtype) def ones(shape, dtype=float): """ Return an Array which is completely filled with ones. Parameters ---------- shape : int or sequence of ints The shape for the new Array. dtype : obj, optional Object to be converted to data type. Default is `float`. Returns ------- Array An Array entirely filled with ones. """ return ConstantArray(shape, 1, dtype=dtype) class _ArrayAdapter(Array): """ Abstract base class for exposing a "concrete" data source as a :class:`biggus.Array`. """ def __init__(self, concrete, keys=()): # concrete must have: # dtype # shape self.concrete = concrete if not isinstance(keys, tuple): keys = (keys,) assert len(keys) <= len(concrete.shape) result_keys = [] for axis, (key, size) in enumerate(zip(keys, concrete.shape)): result_key = self._cleanup_new_key(key, size, axis) result_keys.append(key) self._keys = tuple(result_keys) @property def dtype(self): return self.concrete.dtype @property def fill_value(self): fill_value = getattr(self.concrete, 'fill_value', None) if fill_value is None: fill_value = Array.fill_value.fget(self) return fill_value @property def shape(self): return _sliced_shape(self.concrete.shape, self._keys) def _cleanup_new_key(self, key, size, axis): """ Return a key of type int, slice, or tuple that is guaranteed to be valid for the given dimension size. Raises IndexError/TypeError for invalid keys. """ if _is_scalar(key): if key >= size or key < -size: msg = 'index {0} is out of bounds for axis {1} with' \ ' size {2}'.format(key, axis, size) raise IndexError(msg) elif isinstance(key, slice): pass elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'): if key.size > size: msg = 'too many boolean indices. Boolean index array ' \ 'of size {0} is greater than axis {1} with ' \ 'size {2}'.format(key.size, axis, size) raise IndexError(msg) elif isinstance(key, collections.Iterable) and \ not isinstance(key, basestring): # Make sure we capture the values in case we've # been given a one-shot iterable, like a generator. key = tuple(key) for sub_key in key: if sub_key >= size or sub_key < -size: msg = 'index {0} is out of bounds for axis {1}' \ ' with size {2}'.format(sub_key, axis, size) raise IndexError(msg) else: raise TypeError('invalid key {!r}'.format(key)) return key def _remap_new_key(self, indices, new_key, axis): """ Return a key of type int, slice, or tuple that represents the combination of new_key with the given indices. Raises IndexError/TypeError for invalid keys. """ size = len(indices) if _is_scalar(new_key): if new_key >= size or new_key < -size: msg = 'index {0} is out of bounds for axis {1}' \ ' with size {2}'.format(new_key, axis, size) raise IndexError(msg) result_key = indices[new_key] elif isinstance(new_key, slice): result_key = indices.__getitem__(new_key) elif isinstance(new_key, np.ndarray) and \ new_key.dtype == np.dtype('bool'): # Numpy boolean indexing. if new_key.size > size: msg = 'too many boolean indices. Boolean index array ' \ 'of size {0} is greater than axis {1} with ' \ 'size {2}'.format(new_key.size, axis, size) raise IndexError(msg) result_key = tuple(np.array(indices)[new_key]) elif isinstance(new_key, collections.Iterable) and \ not isinstance(new_key, basestring): # Make sure we capture the values in case we've # been given a one-shot iterable, like a generator. new_key = tuple(new_key) for sub_key in new_key: if sub_key >= size or sub_key < -size: msg = 'index {0} is out of bounds for axis {1}' \ ' with size {2}'.format(sub_key, axis, size) raise IndexError(msg) result_key = tuple(indices[key] for key in new_key) else: raise TypeError('invalid key {!r}'.format(new_key)) return result_key def _getitem_full_keys(self, keys): result_keys = [] shape = list(self.concrete.shape) src_keys = list(self._keys or []) new_keys = list(keys) # While we still have both existing and incoming keys to # deal with... axis = 0 while src_keys and new_keys: src_size = shape.pop(0) src_key = src_keys.pop(0) if _is_scalar(src_key): # An integer src_key means this dimension has # already been sliced away - it's not visible to # the new keys. result_key = src_key elif isinstance(src_key, slice): # A slice src_key means we have to apply the new key # to the sliced version of the concrete dimension. start, stop, stride = src_key.indices(src_size) indices = tuple(range(start, stop, stride)) new_key = new_keys.pop(0) result_key = self._remap_new_key(indices, new_key, axis) else: new_key = new_keys.pop(0) result_key = self._remap_new_key(src_key, new_key, axis) result_keys.append(result_key) axis += 1 # Now mop up any remaining src or new keys. if src_keys: # Any remaining src keys can just be appended. # (They've already been sanity checked against the # concrete array.) result_keys.extend(src_keys) else: # Any remaining new keys need to be checked against # the remaining dimension sizes of the concrete array. for new_key, size in zip(new_keys, shape): result_key = self._cleanup_new_key(new_key, size, axis) result_keys.append(result_key) axis += 1 return type(self)(self.concrete, tuple(result_keys)) @abstractmethod def _apply_keys(self): pass def ndarray(self): array = self._apply_keys() # We want the shape of the result to match the shape of the # Array, so where we've ended up with an array-scalar, # "inflate" it back to a 0-dimensional array. if array.ndim == 0: array = np.array(array) if ma.isMaskedArray(array): array = array.filled() return array def masked_array(self): array = self._apply_keys() # We want the shape of the result to match the shape of the # Array, so where we've ended up with an array-scalar, # "inflate" it back to a 0-dimensional array. if array.ndim == 0 or not ma.isMaskedArray(array): array = ma.MaskedArray(array, fill_value=self.fill_value) return array class NumpyArrayAdapter(_ArrayAdapter): """ Exposes a "concrete" data source which supports NumPy "fancy indexing" as a :class:`biggus.Array`. A NumPy ndarray instance is an example suitable data source. NB. NumPy "fancy indexing" contrasts with orthogonal indexing which treats multiple iterable index keys as independent. """ def _apply_keys(self): # If we have more than one tuple as a key, then NumPy does # "fancy" indexing, instead of "column-based" indexing, so we # need to use multiple indexing operations to get the right # result. keys = self._keys tuple_keys = [(i, key) for i, key in enumerate(keys) if isinstance(key, tuple)] if len(tuple_keys) > 1: # Since we're potentially dealing with very large datasets # we want to cut down the array as much as possible in the # first iteration. # But we can't reliably mix tuple keys with other tuple # keys or with scalar keys. So the possible first cuts are: # - all scalars + all slices (iff there are any scalars) # - [tuple + all slices for tuple in tuples] # Each possible cut will reduce the dataset to different # size, and *ideally* we want to choose the smallest one. # For now though ... # ... use all the non-tuple keys first (if we have any) ... dimensions = np.arange(len(keys)) if len(tuple_keys) != len(keys): cut_keys = list(keys) for i, key in tuple_keys: cut_keys[i] = slice(None) array = self.concrete[tuple(cut_keys)] is_scalar = map(_is_scalar, cut_keys) dimensions -= np.cumsum(is_scalar) else: # Use ellipsis indexing to ensure we have a real ndarray # instance to work with. (Otherwise self.concrete would # need to implement `take` or `__array__`.) array = self.concrete[...] # ... and then do each tuple in turn. for i, key in tuple_keys: array = np.take(array, key, axis=dimensions[i]) else: array = self.concrete.__getitem__(keys) return array class OrthoArrayAdapter(_ArrayAdapter): """ Exposes a "concrete" data source which supports orthogonal indexing as a :class:`biggus.Array`. Orthogonal indexing treats multiple iterable index keys as independent (which is also the behaviour of a :class:`biggus.Array`). For example:: >>> ortho = OrthoArrayAdapter(ConstantArray(shape=[100, 200, 300])) >>> ortho.shape (100, 200, 300) >>> ortho[(0, 3, 4), :, (1, 9)].shape (3, 200, 2) A netCDF4.Variable instance is an example orthogonal concrete array. NB. Orthogonal indexing contrasts with NumPy "fancy indexing" where multiple iterable index keys are zipped together to allow the selection of sparse locations. """ def _apply_keys(self): array = self.concrete.__getitem__(self._keys) return array def _pairwise(iterable): """ itertools recipe "s -> (s0,s1), (s1,s2), (s2, s3), ... """ a, b = itertools.tee(iterable) next(b, None) return itertools.izip(a, b) def _groups_of(length, total_length): """ Return an iterator of tuples for slicing, in 'length' chunks. Parameters ---------- length : int Length of each chunk. total_length : int Length of the object we are slicing Returns ------- iterable of tuples Values defining a slice range resulting in length 'length'. """ indices = tuple(range(0, total_length, length)) + (None, ) return _pairwise(indices) class TransposedArray(ArrayContainer): def __init__(self, array, axes=None): """ Permute the dimensions of an array. Parameters ---------- array - array like The array to transpose axes - list of ints, optional By default, reverse the dimensions, otherwise permute the axes according to the values given. """ super(TransposedArray, self).__init__(array) if axes is None: axes = np.arange(array.ndim)[::-1] elif len(axes) != array.ndim: raise ValueError('Incorrect number of dimensions.') self.axes = axes self._forward_axes_map = {src: dest for dest, src in enumerate(axes)} self._inverse_axes_map = {dest: src for dest, src in enumerate(axes)} def __repr__(self): return 'TransposedArray({!r}, {!r})'.format(self.array, self.axes) def _apply_axes_mapping(self, target, inverse=False): """ Apply the transposition to the target iterable. Parameters ---------- target - iterable The iterable to transpose. This would be suitable for things such as a shape as well as a list of ``__getitem__`` keys. inverse - bool Whether to map old dimension to new dimension (forward), or new dimension to old dimension (inverse). Default is False (forward). Returns ------- A tuple derived from target which has been ordered based on the new axes. """ if len(target) != self.ndim: raise ValueError('The target iterable is of length {}, but ' 'should be of length {}.'.format(len(target), self.ndim)) if inverse: axis_map = self._inverse_axes_map else: axis_map = self._forward_axes_map result = [None] * self.ndim for axis, item in enumerate(target): result[axis_map[axis]] = item return tuple(result) @property def shape(self): return self._apply_axes_mapping(self.array.shape) @property def ndim(self): return self.array.ndim def __getitem__(self, keys): # Inherit the behaviour from Array, **not** from ArrayContainer, # thus meaning we must implement _getitem_full_keys. return super(ArrayContainer, self).__getitem__(keys) def _getitem_full_keys(self, keys): new_transpose_order = list(self.axes) # Map the keys in transposed space back to pre-transposed space. remapped_keys = list(self._apply_axes_mapping(keys, inverse=True)) # Apply the keys to the pre-transposed array. new_arr = self.array[tuple(remapped_keys)] # Compute the new scalar axes in terms of old (pre-transpose) # dimension numbers. new_scalar_axes = [dim for dim, key in enumerate(remapped_keys) if _is_scalar(key)] # Compute the new transpose axes by successively taking the highest # new scalar axes, and removing it from the axes mapping. We must # remember that any axis greater than the removed dimension must # also be reduced by 1. while new_scalar_axes: # Take the highest scalar axis. scalar_axis = new_scalar_axes.pop() new_transpose_order = [axis - 1 if axis >= scalar_axis else axis for axis in new_transpose_order if axis != scalar_axis] return TransposedArray(new_arr, new_transpose_order) def ndarray(self): array = super(TransposedArray, self).ndarray() return array.transpose(self.axes) def masked_array(self): array = super(TransposedArray, self).masked_array() return array.transpose(self.axes) class ArrayStack(Array): """ An Array made from a homogeneous array of other Arrays. Parameters ---------- stack : array-like The array of Arrays to be stacked, where each Array must be of the same shape. """ def __init__(self, stack): stack = np.require(stack, dtype='O') first_array = stack.flat[0] item_shape = first_array.shape dtype = first_array.dtype fill_value = first_array.fill_value if np.issubdtype(dtype, np.floating): def fill_value_ok(array): return array.fill_value == fill_value or ( np.isnan(fill_value) and np.isnan(array.fill_value)) else: def fill_value_ok(array): return array.fill_value == fill_value for array in stack.flat: if not isinstance(array, Array): raise ValueError('sub-array must be subclass of Array') if fill_value is not None and not fill_value_ok(array): fill_value = None ok = array.shape == item_shape and array.dtype == dtype if not ok: raise ValueError('invalid sub-array') self._stack = stack self._item_shape = item_shape self._dtype = dtype if fill_value is None: self._fill_value = np.ma.empty(0, dtype=dtype).fill_value else: self._fill_value = fill_value @property def dtype(self): return self._dtype @property def fill_value(self): return self._fill_value @property def shape(self): return self._stack.shape + self._item_shape def _getitem_full_keys(self, keys): stack_ndim = self._stack.ndim stack_keys = keys[:stack_ndim] item_keys = keys[stack_ndim:] stack_shape = _sliced_shape(self._stack.shape, stack_keys) if stack_shape: stack = self._stack[stack_keys] # If the result was 0D, convert it back to an array. stack = np.array(stack) for index in np.ndindex(stack_shape): item = stack[index] stack[index] = item[item_keys] result = ArrayStack(stack) else: result = self._stack[stack_keys][item_keys] return result def __setitem__(self, keys, value): assert len(keys) == self._stack.ndim for key in keys: assert _is_scalar(key) assert isinstance(value, Array), type(value) self._stack[keys] = value def ndarray(self): data = np.empty(self.shape, dtype=self.dtype) for index in np.ndindex(self._stack.shape): data[index] = self._stack[index].ndarray() return data def masked_array(self): data = ma.empty(self.shape, dtype=self.dtype, fill_value=self.fill_value) for index in np.ndindex(self._stack.shape): masked_array = self._stack[index].masked_array() data[index] = masked_array return data @staticmethod def multidim_array_stack(arrays, shape, order='C'): """ Create an N-dimensional ArrayStack from the sequence of Arrays of the same shape. Example usage: stacking 6 Arrays, each of shape (768, 1024) to a specified shape (2, 3) will result in an ArrayStack of shape (2, 3, 768, 1024). Parameters ---------- arrays : sequence of Arrays The sequence of Arrays to be stacked, where each Array must be of the same shape. shape : sequence of ints Shape of the stack, (2, 3) in the above example. order : {'C', 'F'}, optional Use C (C) or FORTRAN (F) index ordering. Returns ------- ArrayStack With shape corresponding to tuple(stack shape) + tuple(Array.shape) where each Array in the stack must be of the same shape. """ arrays = np.require(arrays, dtype='O') order = order.lower() # Ensure a suitable shape has been specified. size_msg = "total size of new array must be unchanged" try: if np.product(shape) != np.product(arrays.shape): raise ValueError(size_msg) if arrays.ndim > 1: raise ValueError("multidimensional stacks not yet supported") except AttributeError: if np.product(shape) != len(arrays): raise ValueError(size_msg) # Hold the subdivided array subdivided_array = arrays # Recursively subdivide to create an ArrayStack with specified shape. for length in shape[::-1]: # Hold the array before this iterations subdivide. array_before_subdivide = subdivided_array subdivided_array = [] if order == 'f': num = len(array_before_subdivide) // length if length == len(array_before_subdivide): slc = [slice(None)] else: slc = [slice(i, None, num) for i in range(num)] for sc in slc: sub = ArrayStack(np.array(array_before_subdivide[sc], dtype=object)) subdivided_array.append(sub) elif order == 'c': for start, stop in _groups_of(length, len(array_before_subdivide)): sub = ArrayStack(np.array( array_before_subdivide[start:stop], dtype=object)) subdivided_array.append(sub) else: raise TypeError('order not understood') else: # Last iteration, length of the array will be equal to 1. return subdivided_array[0] class LinearMosaic(Array): def __init__(self, tiles, axis): if not isinstance(tiles, collections.Iterable): tiles = [tiles] tiles = np.array(tiles, dtype='O', ndmin=1) if tiles.ndim != 1: raise ValueError('the tiles array must be 1-dimensional') first = tiles[0] if not isinstance(first, Array): raise ValueError('sub-array must be subclass of Array') if not(0 <= axis < first.ndim): msg = 'invalid axis for {0}-dimensional tiles'.format(first.ndim) raise ValueError(msg) # Make sure all the tiles are compatible common_shape = list(first.shape) common_dtype = first.dtype common_fill_value = first.fill_value if np.issubdtype(common_dtype, np.floating): def fill_value_ok(array): return array.fill_value == common_fill_value or ( np.isnan(common_fill_value) and np.isnan(array.fill_value)) else: def fill_value_ok(array): return array.fill_value == common_fill_value del common_shape[axis] for tile in tiles[1:]: if not isinstance(tile, Array): raise ValueError('sub-array must be subclass of Array') shape = list(tile.shape) del shape[axis] if shape != common_shape: raise ValueError('inconsistent tile shapes') if tile.dtype != common_dtype: raise ValueError('inconsistent tile dtypes') if common_fill_value is not None and not fill_value_ok(tile): common_fill_value = None self._tiles = tiles self._axis = axis self._cached_shape = None if common_fill_value is None: self._fill_value = np.ma.empty(0, dtype=common_dtype).fill_value else: self._fill_value = common_fill_value @property def dtype(self): return self._tiles[0].dtype @property def fill_value(self): return self._fill_value @property def shape(self): if self._cached_shape is None: shape = list(self._tiles[0].shape) for tile in self._tiles[1:]: shape[self._axis] += tile.shape[self._axis] self._cached_shape = tuple(shape) return self._cached_shape def _getitem_full_keys(self, full_keys): # Starting backwards, include all keys once a ``key != slice(None)`` # has been found. This will give us only the keys that are really # necessary and thus allow us a shortcut through the indexing. keys = [] non_full_slice_found = False for key in full_keys[::-1]: if isinstance(key, np.ndarray) or key != slice(None): non_full_slice_found = True if non_full_slice_found: keys.append(key) keys = tuple(keys[::-1]) axis = self._axis if len(keys) <= axis: # If there aren't enough keys to affect the tiling axis # then it's safe to just pass the keys to each tile. tile = self._tiles[0] tiles = [tile[keys] for tile in self._tiles] scalar_keys = filter(_is_scalar, keys) result = LinearMosaic(tiles, axis - len(scalar_keys)) else: axis_lengths = [tile.shape[axis] for tile in self._tiles] offsets = np.cumsum([0] + axis_lengths[:-1]) splits = offsets - 1 axis_key = keys[axis] if _is_scalar(axis_key): # Find the single relevant tile tile_index = np.searchsorted(splits, axis_key) - 1 tile = self._tiles[tile_index] tile_indices = list(keys) tile_indices[axis] -= offsets[tile_index] result = tile[tuple(tile_indices)] elif isinstance(axis_key, (slice, collections.Iterable)) and \ not isinstance(axis_key, basestring): # Find the list of relevant tiles. # NB. If the stride is large enough, this might not be a # contiguous subset of self._tiles. if isinstance(axis_key, slice): size = self.shape[axis] all_axis_indices = range(*axis_key.indices(size)) else: all_axis_indices = tuple(axis_key) tile_indices = np.searchsorted(splits, all_axis_indices) - 1 pairs = itertools.izip(all_axis_indices, tile_indices) i = itertools.groupby(pairs, lambda axis_tile: axis_tile[1]) tiles = [] tile_slice = list(keys) for tile_index, group_of_pairs in i: axis_indices = list(zip(*group_of_pairs))[0] tile = self._tiles[tile_index] axis_indices = np.array(axis_indices) axis_indices -= offsets[tile_index] if len(axis_indices) == 1: # Even if we only need one value from this tile # we must preserve the axis dimension by using # a slice instead of a scalar. start = axis_indices[0] step = 1 stop = start + 1 else: start = axis_indices[0] step = axis_indices[1] - start stop = axis_indices[-1] + step tile_slice[axis] = slice(start, stop, step) tiles.append(tile[tuple(tile_slice)]) if isinstance(axis_key, slice) and \ axis_key.step is not None and axis_key.step < 0: tiles.reverse() # Adjust the axis of the new mosaic to account for any # scalar keys prior to our current mosaic axis. new_axis = axis for key in keys[:axis]: if _is_scalar(key): new_axis -= 1 result = LinearMosaic(tiles, new_axis) else: raise TypeError('invalid key {!r}'.format(axis_key)) return result def ndarray(self): data = np.empty(self.shape, dtype=self.dtype) offset = 0 indices = [slice(None)] * self.ndim axis = self._axis for tile in self._tiles: next_offset = offset + tile.shape[axis] indices[axis] = slice(offset, next_offset) data[indices] = tile.ndarray() offset = next_offset return data def masked_array(self): data = ma.empty(self.shape, dtype=self.dtype, fill_value=self.fill_value) offset = 0 indices = [slice(None)] * self.ndim axis = self._axis for tile in self._tiles: next_offset = offset + tile.shape[axis] indices[axis] = slice(offset, next_offset) data[indices] = tile.masked_array() offset = next_offset return data def ndarrays(arrays): """ Return a list of NumPy ndarray objects corresponding to the given biggus Array objects. This can be more efficient (and hence faster) than converting the individual arrays one by one. """ return engine.ndarrays(*arrays) #: The maximum number of bytes to allow when processing an array in #: "bite-size" chunks. The value has been empirically determined to #: provide vaguely near optimal performance under certain conditions. MAX_CHUNK_SIZE = 8 * 1024 * 1024 def _all_slices(array): return _all_slices_inner(array.dtype.itemsize, array.shape) def _all_slices_inner(item_size, shape, always_slices=False): # Return the slices for each dimension which ensure complete # coverage by chunks no larger than MAX_CHUNK_SIZE. # e.g. For a float32 array of shape (100, 768, 1024) the slices are: # (0, 1, 2, ..., 99), # (slice(0, 256), slice(256, 512), slice(512, 768)), # (slice(None) nbytes = item_size all_slices = [] for i, size in reversed(list(enumerate(shape))): if size * nbytes <= MAX_CHUNK_SIZE: slices = (slice(None),) elif nbytes > MAX_CHUNK_SIZE: if always_slices: slices = [slice(i, i + 1) for i in range(size)] else: slices = range(size) else: step = MAX_CHUNK_SIZE // nbytes slices = [] for start in range(0, size, step): slices.append(slice(start, np.min([start + step, size]))) nbytes *= size all_slices.insert(0, slices) return all_slices def save(sources, targets, masked=False): """ Save the numeric results of each source into its corresponding target. Parameters ---------- sources: list The list of source arrays for saving from; limited to length 1. targets: list The list of target arrays for saving to; limited to length 1. masked: boolean Uses a masked array from sources if True. """ # TODO: Remove restriction assert len(sources) == 1 and len(targets) == 1 array = sources[0] target = targets[0] # Request bitesize pieces of the source and assign them to the # target. # NB. This algorithm does not use the minimal number of chunks. # e.g. If the second dimension could be sliced as 0:99, 99:100 # then clearly the first dimension would have to be single # slices for the 0:99 case, but could be bigger slices for the # 99:100 case. # It's not yet clear if this really matters. all_slices = _all_slices(array) for index in np.ndindex(*[len(slices) for slices in all_slices]): keys = tuple(slices[i] for slices, i in zip(all_slices, index)) if masked: target[keys] = array[keys].masked_array() else: target[keys] = array[keys].ndarray() class _StreamsHandler(object): __metaclass__ = ABCMeta @abstractmethod def finalise(self): """ Once a chunk has been processed, this method will be called to complete any remaining computation and return a "result chunk" (which itself could very well go on for further processing). """ pass @abstractmethod def input_iteration_order(self, iteration_order): pass @abstractmethod def process_chunks(self, chunks): pass class _AggregationStreamsHandler(_StreamsHandler): def __init__(self, array, axis): self.array = array self.axis = axis self.current_keys = None @abstractmethod def bootstrap(self, processed_chunk_shape): """ Initialise the processing of the next chunk. Parameters ---------- processed_chunk_shape : list The shape that the current chunk will have once it has been computed. For example, for an aggregation of a chunk of shape ``(x, y, z)``, over axis 1, ``the processed_chunk_shape`` would be ``[x, z]``. """ pass def input_iteration_order(self, iteration_order): order = [i if i < self.axis else i + 1 for i in iteration_order] order.append(self.axis) return order def process_chunks(self, chunks): chunk, = chunks keys = list(chunk.keys) del keys[self.axis] result = None # If this chunk is a new source of data, do appropriate finalisation # of the previous chunk and initialise this one. if keys != self.current_keys: # If this isn't the first time this method has been called, # finalise any data which is waiting to be dealt with. if self.current_keys is not None: result = self.finalise() # Setup the processing of this new chunk. shape = list(chunk.data.shape) del shape[self.axis] self.bootstrap(shape) self.current_keys = keys self.process_data(chunk.data) return result @abstractmethod def process_data(self, data): pass class _CountStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.current_shape = processed_chunk_shape self.running_count = 0 def finalise(self): count = np.ones(self.current_shape, dtype='i') * self.running_count chunk = Chunk(self.current_keys, count) return chunk def process_data(self, data): self.running_count += data.shape[self.axis] class _CountMaskedStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.running_count = np.zeros(processed_chunk_shape, dtype='i') def finalise(self): chunk = Chunk(self.current_keys, self.running_count) return chunk def process_data(self, data): self.running_count += np.ma.count(data, axis=self.axis) class _MinStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.result = np.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.result # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.result = np.min(data, axis=self.axis) class _MinMaskedStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.result = np.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.result # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.ma.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.result = np.min(data, axis=self.axis) class _MaxStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.result = np.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.result # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.result = np.max(data, axis=self.axis) class _MaxMaskedStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.result = np.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.result # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.ma.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.result = np.max(data, axis=self.axis) class _SumStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.running_total = np.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.running_total # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.running_total += np.sum(data, axis=self.axis) class _SumMaskedStreamsHandler(_AggregationStreamsHandler): def bootstrap(self, processed_chunk_shape): self.running_total = np.ma.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.running_total # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.ma.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.running_total += np.sum(data, axis=self.axis) class _MeanStreamsHandler(_AggregationStreamsHandler): def __init__(self, array, axis, mdtol): # The mdtol argument is not applicable to non-masked arrays # so it is ignored. super(_MeanStreamsHandler, self).__init__(array, axis) def bootstrap(self, processed_chunk_shape): self.running_total = np.zeros(processed_chunk_shape, dtype=self.array.dtype) def finalise(self): array = self.running_total / self.array.shape[self.axis] # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.running_total += np.sum(data, axis=self.axis) class _MeanMaskedStreamsHandler(_AggregationStreamsHandler): def __init__(self, array, axis, mdtol): self._mdtol = mdtol super(_MeanMaskedStreamsHandler, self).__init__(array, axis) def bootstrap(self, processed_chunk_shape): shape = processed_chunk_shape self.running_count = np.zeros(shape, dtype=self.array.dtype) self.running_masked_count = np.zeros(shape, dtype=self.array.dtype) self.running_total = np.zeros(shape, dtype=self.array.dtype) def finalise(self): # Avoid any runtime-warning for divide by zero. mask = self.running_count == 0 denominator = np.ma.array(self.running_count, mask=mask, dtype=float) array = np.ma.array(self.running_total, mask=mask) / denominator # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.ma.array(array) # Apply masked/missing data threshold (mdtol). if self._mdtol < 1: mask_update = np.true_divide(self.running_masked_count, self.running_masked_count + self.running_count) > self._mdtol array.mask |= mask_update chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): self.running_count += np.ma.count(data, axis=self.axis) self.running_masked_count += np.ma.count_masked(data, axis=self.axis) self.running_total += np.sum(data, axis=self.axis) class _StdStreamsHandler(_AggregationStreamsHandler): # The algorithm used here preserves numerical accuracy whilst only # requiring a single pass, and is taken from: # Welford, BP (August 1962). "Note on a Method for Calculating # Corrected Sums of Squares and Products". # Technometrics 4 (3): 419-420. # http://zach.in.tu-clausthal.de/teaching/info_literatur/Welford.pdf def __init__(self, array, axis, ddof): self.ddof = ddof super(_StdStreamsHandler, self).__init__(array, axis) def bootstrap(self, processed_chunk_shape): self.k = 1 dtype = (np.zeros(1, dtype=self.array.dtype) / 1.).dtype self.q = np.zeros(processed_chunk_shape, dtype=dtype) def finalise(self): self.q /= (self.k - self.ddof) array = np.sqrt(self.q) # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): data = np.rollaxis(data, self.axis) if self.k == 1: self.a = data[0].copy() data = data[1:] for data_slice in data: self.k += 1 # Compute a(k). temp = data_slice - self.a temp /= self.k self.a += temp # Compute q(k). temp *= temp temp *= self.k * (self.k - 1) self.q += temp class _StdMaskedStreamsHandler(_AggregationStreamsHandler): # The algorithm used here preserves numerical accuracy whilst only # requiring a single pass, and is taken from: # Welford, BP (August 1962). "Note on a Method for Calculating # Corrected Sums of Squares and Products". # Technometrics 4 (3): 419-420. # http://zach.in.tu-clausthal.de/teaching/info_literatur/Welford.pdf def __init__(self, array, axis, ddof): self.ddof = ddof super(_StdMaskedStreamsHandler, self).__init__(array, axis) self.target_shape = list(self.array.shape) del self.target_shape[self.axis] def bootstrap(self, processed_chunk_shape): dtype = (np.zeros(1, dtype=self.array.dtype) / 1.).dtype self.a = np.zeros(processed_chunk_shape, dtype=dtype).flatten() self.q = np.zeros(processed_chunk_shape, dtype=dtype).flatten() self.running_count = np.zeros(processed_chunk_shape, dtype=dtype).flatten() self.current_shape = processed_chunk_shape def finalise(self): mask = self.running_count == 0 denominator = ma.array(self.running_count, mask=mask) - self.ddof q = ma.array(self.q, mask=mask) / denominator array = ma.sqrt(q) array.shape = self.current_shape # Promote array-scalar to 0-dimensional array. if array.ndim == 0: array = np.ma.array(array) chunk = Chunk(self.current_keys, array) return chunk def process_data(self, data): data = np.rollaxis(data, self.axis) for chunk_slice in data: chunk_slice = chunk_slice.flatten() bootstrapped = self.running_count != 0 have_data = ~ma.getmaskarray(chunk_slice) chunk_data = ma.array(chunk_slice).filled(0) # Bootstrap a(k) where necessary. self.a[~bootstrapped] = chunk_data[~bootstrapped] self.running_count += have_data # Compute a(k). do_stuff = bootstrapped & have_data temp = ((chunk_data[do_stuff] - self.a[do_stuff]) / self.running_count[do_stuff]) self.a[do_stuff] += temp # Compute q(k). temp *= temp temp *= (self.running_count[do_stuff] * (self.running_count[do_stuff] - 1)) self.q[do_stuff] += temp class _VarStreamsHandler(_StdStreamsHandler): def finalise(self): chunk = super(_VarStreamsHandler, self).finalise() chunk = Chunk(chunk.keys, chunk.data * chunk.data) return chunk class _VarMaskedStreamsHandler(_StdMaskedStreamsHandler): def finalise(self): chunk = super(_VarMaskedStreamsHandler, self).finalise() chunk = Chunk(chunk.keys, chunk.data * chunk.data) return chunk class ComputedArray(Array): @abstractproperty def sources(self): """The tuple of Array instances from which the result is computed.""" @abstractmethod def streams_handler(self, masked): """Return a StreamsHandler which can compute the result.""" class _Aggregation(ComputedArray): def __init__(self, array, axis, streams_handler_class, masked_streams_handler_class, dtype, kwargs): self._array = ensure_array(array) self._axis = axis self._streams_handler_class = streams_handler_class self._masked_streams_handler_class = masked_streams_handler_class self._dtype = dtype self._kwargs = kwargs @property def dtype(self): return self._dtype @property def shape(self): shape = list(self._array.shape) del shape[self._axis] return tuple(shape) @property def sources(self): return (self._array,) def _getitem_full_keys(self, keys): # Insert an ':' into these keys to get keys for self._array. keys = list(keys) keys[self._axis:self._axis] = [slice(None)] keys = tuple(keys) # Reduce the aggregation-axis by the number of prior dimensions that # get removed by the indexing operation. scalar_axes = map(_is_scalar, keys[:self._axis]) result_axis = self._axis - __builtin__.sum(scalar_axes) return _Aggregation(self._array[keys], result_axis, self._streams_handler_class, self._masked_streams_handler_class, self.dtype, self._kwargs) def ndarray(self): result, = engine.ndarrays(self) return result def masked_array(self): result, = engine.masked_arrays(self) return result def streams_handler(self, masked): if masked: handler_class = self._masked_streams_handler_class else: handler_class = self._streams_handler_class source, = self.sources return handler_class(source, self._axis, **self._kwargs) def _normalise_axis(axis, array): # Convert `axis` to None, or a tuple of positive ints, or raise a # TypeError/ValueError. if axis is None: axes = None elif _is_scalar(axis): axes = (axis,) elif (isinstance(axis, collections.Iterable) and not isinstance(axis, (basestring, collections.Mapping)) and all(map(_is_scalar, axis))): axes = tuple(axis) else: raise TypeError('axis must be None, int, or iterable of ints') if axes is not None: axes = tuple(axis if axis >= 0 else array.ndim + axis for axis in axes) if not all(0 <= axis < array.ndim for axis in axes): raise ValueError("'axis' value is out of bounds") return axes def count(a, axis=None): """ Count the non-masked elements of the array along the given axis. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :return: The Array representing the requested mean. :rtype: Array """ axes = _normalise_axis(axis, a) if axes is None or len(axes) != 1: msg = "This operation is currently limited to a single axis" raise AxisSupportError(msg) return _Aggregation(a, axes[0], _CountStreamsHandler, _CountMaskedStreamsHandler, np.dtype('i'), {}) def min(a, axis=None): """ Request the minimum of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. Parameters ---------- a : Array object The object whose minimum is to be found. axis : None, or int, or iterable of ints Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. Returns ------- out : Array The Array representing the requested mean. """ axes = _normalise_axis(axis, a) assert axes is not None and len(axes) == 1 return _Aggregation(a, axes[0], _MinStreamsHandler, _MinMaskedStreamsHandler, a.dtype, {}) def max(a, axis=None): """ Request the maximum of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. Parameters ---------- a : Array object The object whose maximum is to be found. axis : None, or int, or iterable of ints Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. Returns ------- out : Array The Array representing the requested max. """ axes = _normalise_axis(axis, a) assert axes is not None and len(axes) == 1 return _Aggregation(a, axes[0], _MaxStreamsHandler, _MaxMaskedStreamsHandler, a.dtype, {}) def sum(a, axis=None): """ Request the sum of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. Parameters ---------- a : Array object The object whose summation is to be found. axis : None, or int, or iterable of ints Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. Returns ------- out : Array The Array representing the requested sum. """ axes = _normalise_axis(axis, a) assert axes is not None and len(axes) == 1 return _Aggregation(a, axes[0], _SumStreamsHandler, _SumMaskedStreamsHandler, a.dtype, {}) def mean(a, axis=None, mdtol=1): """ Request the mean of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :param float mdtol: Tolerance of missing data. The value in each element of the resulting array will be masked if the fraction of masked data contributing to that element exceeds mdtol. mdtol=0 means no missing data is tolerated while mdtol=1 will mean the resulting element will be masked if and only if all the contributing elements of the source array are masked. Defaults to 1. :return: The Array representing the requested mean. :rtype: Array """ axes = _normalise_axis(axis, a) if axes is None or len(axes) != 1: msg = "This operation is currently limited to a single axis" raise AxisSupportError(msg) dtype = (np.array([0], dtype=a.dtype) / 1.).dtype kwargs = dict(mdtol=mdtol) return _Aggregation(a, axes[0], _MeanStreamsHandler, _MeanMaskedStreamsHandler, dtype, kwargs) def std(a, axis=None, ddof=0): """ Request the standard deviation of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :param int ddof: Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. By default ddof is zero. :return: The Array representing the requested standard deviation. :rtype: Array """ axes = _normalise_axis(axis, a) if axes is None or len(axes) != 1: msg = "This operation is currently limited to a single axis" raise AxisSupportError(msg) dtype = (np.array([0], dtype=a.dtype) / 1.).dtype return _Aggregation(a, axes[0], _StdStreamsHandler, _StdMaskedStreamsHandler, dtype, dict(ddof=ddof)) def var(a, axis=None, ddof=0): """ Request the variance of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. :param axis: Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. :type axis: None, or int, or iterable of ints. :param int ddof: Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. By default ddof is zero. :return: The Array representing the requested variance. :rtype: Array """ axes = _normalise_axis(axis, a) if axes is None or len(axes) != 1: msg = "This operation is currently limited to a single axis" raise AxisSupportError(msg) dtype = (np.array([0], dtype=a.dtype) / 1.).dtype return _Aggregation(a, axes[0], _VarStreamsHandler, _VarMaskedStreamsHandler, dtype, dict(ddof=ddof)) class _ElementwiseStreamsHandler(_StreamsHandler): def __init__(self, sources, operator): self.sources = sources self.operator = operator def finalise(self): pass def input_iteration_order(self, iteration_order): return iteration_order def process_chunks(self, chunks): array = self.operator(*[chunk.data for chunk in chunks]) chunk = Chunk(chunks[0].keys, array) return chunk class _Elementwise(ComputedArray): def __init__(self, array1, array2, numpy_op, ma_op): array1 = ensure_array(array1) array2 = ensure_array(array2) # Broadcast both arrays to the full broadcast shape. # TypeError will be raised if not broadcastable. array1, array2 = BroadcastArray.broadcast_arrays(array1, array2) # Type-promotion - The resultant dtype depends on both the array # dtypes and the operation. Avoid using np.find_common_dtype() here, # as integer division yields a float, whereas standard type coercion # rules with find_common_dtype yields an integer. self._dtype = numpy_op(np.ones(1, dtype=array1.dtype), np.ones(1, dtype=array2.dtype)).dtype self._array1 = array1 self._array2 = array2 self._numpy_op = numpy_op self._ma_op = ma_op @property def dtype(self): return self._dtype @property def shape(self): return self._array1.shape @property def sources(self): return (self._array1, self._array2) def _getitem_full_keys(self, keys): return _Elementwise(self._array1[keys], self._array2[keys], self._numpy_op, self._ma_op) def _calc(self, op): operands = (self._array1, self._array2) np_operands = ndarrays(operands) result = op(*np_operands) return result def ndarray(self): result = self._calc(self._numpy_op) return result def masked_array(self): result = self._calc(self._ma_op) return result def streams_handler(self, masked): if masked: operator = self._ma_op else: operator = self._numpy_op return _ElementwiseStreamsHandler(self.sources, operator) def add(a, b): """ Return the elementwise evaluation of `a + b` as another Array. """ return _Elementwise(a, b, np.add, np.ma.add) def sub(a, b): """ Return the elementwise evaluation of `a - b` as another Array. """ return _Elementwise(a, b, np.subtract, np.ma.subtract) def multiply(a, b): """ Return the elementwise evaluation of `a * b` as another Array. """ return _Elementwise(a, b, np.multiply, np.ma.multiply) def floor_divide(a, b): """ Return the elementwise evaluation of `a / b` as another Array. """ return _Elementwise(a, b, np.floor_divide, np.ma.floor_divide) def true_divide(a, b): """ Return the elementwise evaluation of ``np.true_divide`` as another Array. """ return _Elementwise(a, b, np.true_divide, np.ma.true_divide) def power(a, b): """ Return the elementwise evaluation of `a ** b` as another Array. """ return _Elementwise(a, b, np.power, np.ma.power) def _sliced_shape(shape, keys): """ Returns the shape that results from slicing an array of the given shape by the given keys. >>> _sliced_shape(shape=(52350, 70, 90, 180), ... keys=(np.newaxis, slice(None, 10), 3, ... slice(None), slice(2, 3))) (1, 10, 90, 1) """ keys = _full_keys(keys, len(shape)) sliced_shape = [] shape_dim = -1 for key in keys: shape_dim += 1 if _is_scalar(key): continue elif isinstance(key, slice): size = len(range(*key.indices(shape[shape_dim]))) sliced_shape.append(size) elif isinstance(key, np.ndarray) and key.dtype == np.dtype('bool'): # Numpy boolean indexing. sliced_shape.append(__builtin__.sum(key)) elif isinstance(key, (tuple, np.ndarray)): sliced_shape.append(len(key)) elif key is np.newaxis: shape_dim -= 1 sliced_shape.append(1) else: raise ValueError('Invalid indexing object "{}"'.format(key)) sliced_shape = tuple(sliced_shape) return sliced_shape def _full_keys(keys, ndim): """ Given keys such as those passed to ``__getitem__`` for an array of ndim, return a fully expanded tuple of keys. In all instances, the result of this operation should follow: array[keys] == array[_full_keys(keys, array.ndim)] """ if not isinstance(keys, tuple): keys = (keys,) # Make keys mutable, and take a copy. keys = list(keys) # Count the number of keys which actually slice a dimension. n_keys_non_newaxis = len([key for key in keys if key is not np.newaxis]) # Numpy allows an extra dimension to be an Ellipsis, we remove it here # if Ellipsis is in keys, if this doesn't trigger we will raise an # IndexError. is_ellipsis = [key is Ellipsis for key in keys] if n_keys_non_newaxis - 1 >= ndim and any(is_ellipsis): # Remove the left-most Ellipsis, as numpy does. keys.pop(is_ellipsis.index(True)) n_keys_non_newaxis -= 1 if n_keys_non_newaxis > ndim: raise IndexError('Dimensions are over specified for indexing.') lh_keys = [] # Keys, with the last key first. rh_keys = [] take_from_left = True while keys: if take_from_left: next_key = keys.pop(0) keys_list = lh_keys else: next_key = keys.pop(-1) keys_list = rh_keys if next_key is Ellipsis: next_key = slice(None) take_from_left = not take_from_left keys_list.append(next_key) middle = [slice(None)] * (ndim - n_keys_non_newaxis) return tuple(lh_keys + middle + rh_keys[::-1]) def ensure_array(array): """ Assert that the given array is an Array subclass (or numpy array). If the given array is a numpy.ndarray an appropriate NumpyArrayAdapter instance is created, otherwise the passed array must be a subclass of :class:`Array` else a TypeError will be raised. """ if not isinstance(array, Array): if isinstance(array, np.ndarray): array = NumpyArrayAdapter(array) elif np.isscalar(array): array = ConstantArray([], array) else: raise TypeError('The given array should be a `biggus.Array` ' 'instance, got {}.'.format(type(array))) return array def size(array): """ Return a human-readable description of the number of bytes required to store the data of the given array. For example:: >>> array.nbytes 14000000 >> biggus.size(array) '13.35 MiB' Parameters ---------- array : array-like object The array object must provide an `nbytes` property. Returns ------- out : str The Array representing the requested mean. """ nbytes = array.nbytes if nbytes < (1 << 10): size = '{} B'.format(nbytes) elif nbytes < (1 << 20): size = '{:.02f} KiB'.format(nbytes / (1 << 10)) elif nbytes < (1 << 30): size = '{:.02f} MiB'.format(nbytes / (1 << 20)) elif nbytes < (1 << 40): size = '{:.02f} GiB'.format(nbytes / (1 << 30)) else: size = '{:.02f} TiB'.format(nbytes / (1 << 40)) return size
gpl-3.0
-5,203,147,137,453,390,000
33.830543
79
0.574641
false
MichaelGuarin0/DocumentClassification
src/han.py
1
7468
""" @author: Michael Guarino """ import numpy as np np.set_printoptions(threshold=np.nan) import tensorflow as tf from tensorflow.contrib import rnn import tensorflow.contrib.layers as layers class HAN: def __init__(self, max_seq_len, max_sent_len, num_classes, vocab_size, embedding_size, max_grad_norm, dropout_keep_proba, learning_rate): ## Parameters self.learning_rate = learning_rate self.vocab_size = vocab_size self.num_classes = num_classes self.max_seq_len = max_seq_len self.embedding_size = embedding_size self.word_encoder_num_hidden = max_seq_len self.word_output_size = max_seq_len self.sentence_encoder_num_hidden = max_sent_len self.sentence_output_size = max_sent_len self.max_grad_norm = max_grad_norm self.dropout_keep_proba = dropout_keep_proba # tf graph input self.input_x = tf.placeholder(shape=[None, None, None], dtype=tf.int32, name="input_x") self.input_y = tf.placeholder(shape=[None, self.num_classes], dtype=tf.int32, name="input_y") self.word_lengths = tf.placeholder(shape=[None, None], dtype=tf.int32, name="word_lengths") self.sentence_lengths = tf.placeholder(shape=[None,], dtype=tf.int32, name="sentence_lengths") self.is_training = tf.placeholder(dtype=tf.bool, name="is_training") # input_x dims (self.document_size, self.sentence_size, self.word_size) = tf.unstack(tf.shape(self.input_x)) with tf.device("/gpu:0"), tf.name_scope("embedding_layer"): w = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), dtype=tf.float32, name="w") # TODO check if this needs to be marked as untrainable self.input_x_embedded = tf.nn.embedding_lookup(w, self.input_x) # reshape input_x after embedding self.input_x_embedded = tf.reshape(self.input_x_embedded, [self.document_size * self.sentence_size, self.word_size, self.embedding_size]) self.input_x_embedded_lengths = tf.reshape(self.word_lengths, [self.document_size * self.sentence_size]) with tf.variable_scope("word_level"): self.word_encoder_outputs = self.bidirectional_RNN(num_hidden=self.word_encoder_num_hidden, inputs=self.input_x_embedded) word_level_output = self.attention(inputs=self.word_encoder_outputs, output_size=self.word_output_size) with tf.variable_scope("dropout"): print('self.is_training: {}'.format(self.is_training)) word_level_output = layers.dropout(word_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training) # reshape word_level output self.sentence_encoder_inputs = tf.reshape(word_level_output, [self.document_size, self.sentence_size, self.word_output_size]) with tf.variable_scope("sentence_level"): self.sentence_encoder_outputs = self.bidirectional_RNN(num_hidden=self.sentence_encoder_num_hidden, inputs=self.sentence_encoder_inputs) sentence_level_output = self.attention(inputs=self.sentence_encoder_outputs, output_size=self.sentence_output_size) with tf.variable_scope("dropout"): sentence_level_output = layers.dropout(sentence_level_output, keep_prob=self.dropout_keep_proba, is_training=self.is_training) # Final model prediction with tf.variable_scope("classifier_output"): self.logits = layers.fully_connected(sentence_level_output, self.num_classes, activation_fn=None) #trainable=self.is_training) self.predictions = tf.argmax(self.logits, axis=1, name="predictions") # Calculate mean cross-entropy loss with tf.variable_scope("loss"): losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.logits) self.loss = tf.reduce_mean(losses) tf.summary.scalar("Loss", self.loss) # Accuracy with tf.variable_scope("accuracy"): correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, axis=1)) self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") tf.summary.scalar("Accuracy", self.accuracy) def bidirectional_RNN(self, num_hidden, inputs): """ desc: create bidirectional rnn layer args: num_hidden: number of hidden units inputs: input word or sentence returns: concatenated encoder and decoder outputs """ with tf.name_scope("bidirectional_RNN"): encoder_fw_cell = rnn.GRUCell(num_hidden) encoder_bw_cell = rnn.GRUCell(num_hidden) ((encoder_fw_outputs, encoder_bw_outputs), (_, _)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_fw_cell, cell_bw=encoder_bw_cell, inputs=inputs, dtype=tf.float32, time_major=True) encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2) return encoder_outputs # end def attention(self, inputs, output_size): """ desc: create attention mechanism args: inputs: input which is sentence or document level output from bidirectional rnn layer output_size: specify the dimensions of the output returns: output from attention distribution """ with tf.variable_scope("attention"): attention_context_vector_uw = tf.get_variable(name="attention_context_vector", shape=[output_size], #trainable=self.is_training, initializer=layers.xavier_initializer(), dtype=tf.float32) input_projection_u = layers.fully_connected(inputs, output_size, #trainable=self.is_training, activation_fn=tf.tanh) vector_attn = tf.reduce_sum(tf.multiply(input_projection_u, attention_context_vector_uw), axis=2, keep_dims=True) attention_weights = tf.nn.softmax(vector_attn, dim=1) weighted_projection = tf.multiply(input_projection_u, attention_weights) outputs = tf.reduce_sum(weighted_projection, axis=1) return outputs # end # end
mit
-6,694,304,337,271,059,000
47.810458
119
0.547402
false
nirea/collardata
freebielist.py
1
4787
from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app from model import FreebieItem, Distributor, Contributor import datetime import logging head = ''' <html> <head> <title>%s</title> <script src="/static/sorttable.js"></script> <style> body { background-color: #000000; color: #FFFFFF; } input { background-color: #000000; color: #FF0000; outline-color: #000000; border-color: #FF0000; } table.sortable thead { background-color:#202020; color:#FFFFFF; font-weight: bold; cursor: default; } </style> </head> <body> <b><a href="/freebielist/">Freebies</a> | <a href="/freebielist/distributors">Distributors</a> | <a href="/freebielist/contributors">Contributors</a></b><p> ''' end = ''' </body> </html> ''' class Distributors(webapp.RequestHandler): def get(self): message = '''<h1>List of Distributors</h1> <p>This lists all Distributors currently in the distribution system as of %s.</p> <table class="sortable" border=\"1\">''' % datetime.datetime.utcnow().isoformat(' ') message += '<tr><th>Row</th><th>Distributor</th><th>Key</th></tr><br />\n' query = Distributor.gql("") dists = [] for record in query: s = '<td>%s</td><td>%s</td>\n' % (record.avname, record.avkey) if (s in dists) == False: dists += [s] for i in range(0,len(dists)): message += '<tr><td>%d</td>%s' % (i+1, dists[i]) message += "</table>" self.response.out.write((head % 'Distributor List') + message + end) class Contributors(webapp.RequestHandler): def get(self): message = '''<h1>List of Contributors</h1> <p>This lists all Contributors currently in the distribution system as of %s.</p> <table class="sortable" border=\"1\">''' % datetime.datetime.utcnow().isoformat(' ') message += '<tr><th>Row</th><th>Contributor</th><th>Key</th></tr><br />\n' query = Contributor.gql("") dists = [] for record in query: s = '<td>%s</td><td>%s</td>\n' % (record.avname, record.avkey) if (s in dists) == False: dists += [s] for i in range(0,len(dists)): message += '<tr><td>%d</td>%s' % (i+1, dists[i]) message += "</table>" self.response.out.write((head % 'Contributor List') + message + end) class MainPage(webapp.RequestHandler): def get(self): message = '''<h1>List of Freebie items</h1> <p>This lists all item currently in the distribution system as of %s.</p> <table class="sortable" border=\"1\">''' % datetime.datetime.utcnow().isoformat(' ') message += '<tr><th>Row</th><th>Owner</th><th>Giver ID</th><th>Name</th><th>Version</th><th>Update Date</th><th>Distributor Location</th><th>Texture Key</th><th>Texture Server</th><th>Texture Updatetime</th></tr><br />\n' query = FreebieItem.gql("") content =[] for record in query: owner = record.freebie_owner if (owner == None): owner = '***Not assigned***' if (record.freebie_texture_update == None): i = -1 else: i = record.freebie_texture_update content += ['<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%d</td>\n' % (owner, record.freebie_giver, record.freebie_name, record.freebie_version, record.freebie_timedate, record.freebie_location, record.freebie_texture_key, record.freebie_texture_serverkey, i)] content = sorted(content) for i in range(0,len(content)): message += '<tr><td>%d</td>%s' % (i+1, content[i]) message += "</table>" self.response.out.write((head % 'Freebie Items List') + message + end) application = webapp.WSGIApplication([ (r'/.*?/distributors',Distributors), (r'/.*?/contributors',Contributors), ('.*', MainPage) ], debug=True) def real_main(): run_wsgi_app(application) def profile_main(): # This is the main function for profiling # We've renamed our original main() above to real_main() import cProfile, pstats, StringIO prof = cProfile.Profile() prof = prof.runctx("real_main()", globals(), locals()) stream = StringIO.StringIO() stats = pstats.Stats(prof, stream=stream) stats.sort_stats("time") # Or cumulative stats.print_stats(80) # 80 = how many to print # The rest is optional. # stats.print_callees() # stats.print_callers() logging.info("Profile data:\n%s", stream.getvalue()) if __name__ == "__main__": profile_main()
gpl-2.0
-5,281,383,379,673,156,000
32.702899
320
0.577815
false
larioandr/pyons
pyons/sim.py
1
16159
# # File: sim.py # # Provides simulation kernel (queues, simulation objects, etc.). It is not intended to be called directly, # the API layer is given in a separate module. # from enum import Enum from decimal import Decimal import pyons.queues as queues class Singleton(type): """ Metaclass for all singletons in the system (e.g. Kernel). The example code is taken from http://python-3-patterns-idioms-test.readthedocs.io/en/latest/Metaprogramming.html """ instance = None def __call__(cls, *args, **kw): if not cls.instance: cls.instance = super(Singleton, cls).__call__(*args, **kw) return cls.instance class Kernel(object, metaclass=Singleton): class Context(object): def __init__(self): self.call_stack = [] self.initializing = False self.finalizing = False @property def current_handler(self): return None if len(self.call_stack) == 0 else self.call_stack[-1] class Envelope(object): def __init__(self): self.fire_time = None # when to launch self.source_handler = None # who created the event (could be None if called from initializer) self.target_handler = None # where to dispatch the event (could be None) self.event = None # user-defined event def __init__(self): self.__queue = None # event queue, see queues.py module self.__default_handler = None # stores handler function self.__conditional_handlers = [] # stores pairs (condition, handler-function) self.__initializers = {} # methods to be called when the simulation starts, stored as {stage: [fn1, fn2]...} self.__finalizers = {} # methods to be called when the simulation stops, stored as {stage: [fn1, fn2]...} self.__context = Kernel.Context() # current context self.__events_num = 0 # number of events generated self.__stop_flag = False # flag to force simulation stop self.__aborted = False # flag indicating the simulation was aborted, no finalizers should be called self.__time = 0. # simulation time (seconds) self.__stop_conditions = [] # each stop condition is a predicate: (model) -> bool self.model = None self.__environment = None def run(self, mode=None, max_events=None, a_queue=None): """ Run the simulation. This includes: 1) initialization; 2) event loop iteration; 3) finalization. At the first step, all the methods registered with @static_initializer() decorator would be called. These methods are expected to create initial events and prepare the model. The second step (running the event loop) is performed till event queue is not empty. User can also stop the loop with stop() or abort() call. If ``max_events`` argument was set, it specifies the number of events after which the loop will stop. The third step (finalization) is performed if the second step ended with anything except abort() call. It calls all the methods registered with @finalizer decorator. Args: mode: not supported now max_events: after handling this number of events simulation will stop a_queue: the event queue. By default, ``queues.HeapQueue`` would be used. """ self.__queue = a_queue if a_queue is not None else queues.HeapQueue( event_time_getter=lambda envelope: envelope.fire_time) self.__events_num = 0 # setting number of events served self.__stop_flag = False # we are not going to stop from the beginning self.__aborted = False # and we were not aborted yet self.__time = 0.0 # setting simulation time to zero self.context.call_stack = [] self._initialize() # calling initializing handlers # # Main event-loop # self.context.initializing = False while not self.__stop_flag and not self.__queue.empty: if max_events is not None and self.__events_num > max_events: self.__stop_flag = True for stop_condition in self.__stop_conditions: if stop_condition(self.model): if self.environment is not None: self.environment.debug("[===] stop condition fired: {}".format(stop_condition.sim_name), sender="kernel") self.__stop_flag = True break if not self.__stop_flag: e = self.__queue.pop() self.__time = e.fire_time self._dispatch(e) self.__events_num += 1 if not self.__aborted: self._finalize() # finalize the simulation @property def environment(self): if self.__environment is None: self.environment = Environment() return self.__environment @environment.setter def environment(self, env): self.__environment = env env.kernel = self @property def events(self): return self.__queue.events @property def time(self): return self.__time @property def context(self): return self.__context @property def num_events_handled(self): return self.__events_num def stop(self): """ Stop the simulation. After the control will come back to the event-loop, no more events would be taken and finalization will take place. """ self.__stop_flag = True def abort(self): """ Stop the simulation and don't perform finalization. """ self.__stop_flag = True self.__aborted = True @staticmethod def add_staged_function(fn, stage, functions_dict): if stage not in functions_dict: functions_dict[stage] = [] functions_dict[stage].append(fn) @staticmethod def staged_functions_iterator(functions_dict): stages = sorted(functions_dict) for stage in stages: for fn in functions_dict[stage]: yield stage, fn def register_handler(self, handler, *, is_default=False, condition=None): if is_default: if self.__default_handler is not None: raise RuntimeError("single default handler is allowed") self.__default_handler = handler else: if condition is not None: self.__conditional_handlers.append((condition, handler)) def register_initializer(self, fn, stage=0): Kernel.add_staged_function(fn, stage, self.__initializers) def register_finalizer(self, fn, stage=0): Kernel.add_staged_function(fn, stage, self.__finalizers) def register_stop_condition(self, predicate): self.__stop_conditions.append(predicate) def schedule(self, event, fire_time, target_handler=None): if self.context.finalizing: raise RuntimeError("scheduling disallowed during finalization") if not self.context.initializing and self.context.current_handler is None: raise RuntimeError("scheduling outside the handler scope prohibited") envelope = Kernel.Envelope() envelope.fire_time = fire_time envelope.event = event envelope.source_handler = self.context.current_handler envelope.target_handler = target_handler return self.__queue.push(envelope) def cancel(self, event_index): self.__queue.remove(index=event_index) def _initialize(self): for stage, fn in Kernel.staged_functions_iterator(self.__initializers): if self.environment is not None: self.environment.debug("[init stage='{stage}'] calling {fn}".format(fn=fn.sim_name, stage=stage), sender='kernel') fn(model=self.model) def _finalize(self): for stage, fn in Kernel.staged_functions_iterator(self.__finalizers): if self.environment is not None: self.environment.debug("[fin stage='{stage}'] calling {fn}".format(fn=fn.sim_name, stage=stage), sender='kernel') fn(model=self.model) def _dispatch(self, envelope): handler = self.__default_handler if envelope.target_handler is not None: handler = envelope.target_handler else: for condition, cond_handler in self.__conditional_handlers: if condition(envelope.event): handler = cond_handler break if handler is None: raise RuntimeError("handler not found for the event '{}'".format(envelope.event)) if self.environment is not None: self.environment.debug("[>>>] calling {fn}() for event='{event}'".format( fn=handler.sim_name, event=envelope.event), sender='kernel') handler(envelope.event, model=self.model) class LogLevel(Enum): ALL = -1 TRACE = 0 DEBUG = 1 INFO = 2 WARNING = 3 ERROR = 4 OFF = 5 class Environment(object): def __init__(self): self.kernel = None self.log_level = LogLevel.INFO self.time_precision = 9 self.sender_field_width = 16 def log(self, level, message, sender=None): assert LogLevel.TRACE.value <= level.value <= LogLevel.ERROR.value if level.value >= self.log_level.value: s_time = "{time:0{width}.{precision}f} ".format(time=self.kernel.time, width=self.time_precision+6, precision=self.time_precision) s_level = "[{level.name:^7s}] ".format(level=level) if sender is not None: s_sender = "({sender:^{width}s}) ".format(sender=sender, width=self.sender_field_width) else: s_sender = "" print("{time}{level}{sender}{message}".format(time=s_time, level=s_level, sender=s_sender, message=message)) def trace_enter_function(self, fn, indention_level=0): self.log(LogLevel.TRACE, "{indent}----> {fun_name}".format( indent=" "*indention_level, fun_name=fn.__name__)) def trace_exit_function(self, fn, indention_level=0): self.log(LogLevel.TRACE, "{indent}<---- {fun_name}".format( indent=" "*indention_level, fun_name=fn.__name__)) def debug(self, message, sender=None): self.log(LogLevel.DEBUG, message, sender) def info(self, message, sender=None): self.log(LogLevel.INFO, message, sender) def warning(self, message, sender=None): self.log(LogLevel.WARNING, message, sender) def error(self, message, sender=None): self.log(LogLevel.ERROR, message, sender) self.kernel.abort() def now(): return Kernel().time def run(model=None, mode=None, max_events=None, queue=None): kernel = Kernel() kernel.model = model kernel.run(mode=mode, max_events=max_events, a_queue=queue) def stop(): Kernel().stop() def abort(): Kernel().abort() def schedule(event, dt, handler=None): kernel = Kernel() kernel.schedule(event, fire_time=kernel.time + dt, target_handler=handler) def create_timeout(event, dt): kernel = Kernel() kernel.schedule(event, fire_time=kernel.time + dt, target_handler=kernel.context.current_handler) def send_event(event, handler=None): kernel = Kernel() kernel.schedule(event, fire_time=kernel.time, target_handler=handler) def cancel_event(event_index): Kernel().cancel(event_index=event_index) def debug(message, sender=None): Kernel().environment.debug(message, sender=sender) def info(message, sender=None): Kernel().environment.info(message, sender=sender) def warning(message, sender=None): Kernel().environment.warning(message, sender=sender) def error(message, sender=None): Kernel().environment.error(message, sender=sender) def get_log_level(): return Kernel().environment.log_level def setup_env(log_level=LogLevel.DEBUG, sender_field_width=16, time_precision=9): env = Kernel().environment env.log_level = log_level env.sender_field_width = sender_field_width env.time_precision = time_precision def static_handler(predicate=None, default=False, name=None): """ Produce a decorator for the static event handlers. Args: predicate: a condition for the event to enter the decorated function default: flag indicating the handler is the default one. Depending on whether ``predicate`` was provided, two scenarios are possible: 1) ``predicate`` is not set (``None`` by default): the handler can be called directly only if set when scheduling the event with ``schedule/send_event`` methods, or if scheduled with ``create_timeout`` method. Example: def sender(): ... schedule(dt=1.0, event='Timeout!', target_handler=handle_timeout) ... @static_handler() def handle_timeout(event): ... # handles the 'Timeout!' event after one second 2) ``predicate`` is set to some logical function: the handler can be specified directly as in the previous case, but it also will be called if the event confirms the predicate. Example: def sender(): ... schedule(dt=1.0, event='Timeout!') ... @static_handler(event_predicate=lambda event: event == 'Timeout!') def handle_timeout(event): ... # handles the 'Timeout!' event after one second If ``default`` flag is set, the handler is considered the default one (that is called one no handler is specified in scheduling call and no conditional handlers found for the event). Note, that only one default handler is allowed. Returns: a decorator for the static event handler """ def decorator(fn): def wrapper(event, model): kern = Kernel() kern.context.call_stack.append(wrapper) fn(event, model=model) kern.context.call_stack.pop() kernel = Kernel() kernel.register_handler(wrapper, is_default=default, condition=predicate) wrapper.sim_name = name if name is not None else fn.__name__ return wrapper return decorator def static_initializer(stage=0, name=None): def decorator(fn): kernel = Kernel() # it's ok to define kernel here since it is a singleton def wrapper(model): kernel.context.initializing = True fn(model=model) kernel.context.initializing = False kernel.register_initializer(fn=wrapper, stage=stage) wrapper.sim_name = name if name is not None else fn.__name__ return wrapper return decorator def static_finalizer(stage=0, name=None): def decorator(fn): kernel = Kernel() # it's ok to define kernel here since it is a singleton def wrapper(model): kernel.context.finalizing = True fn(model=model) kernel.context.finalizing = False kernel.register_finalizer(fn=wrapper, stage=stage) wrapper.sim_name = name if name is not None else fn.__name__ return wrapper return decorator def static_stop_condition(guard=None, name=None): def decorator(fn): kernel = Kernel() # it's ok to define kernel here since it is a singleton def wrapper(model): if guard is None or guard(kernel): return fn(model=model) else: return False kernel.register_stop_condition(wrapper) wrapper.sim_name = name if name is not None else fn.__name__ return wrapper return decorator
mit
-8,448,115,932,783,224,000
33.900648
120
0.607463
false
dhrone/pydPiper
displays/luma_i2c.py
1
6449
#!/usr/bin/python # coding: UTF-8 # Driver for SSD1306 OLED display on the RPi using I2C interface # Written by: Ron Ritchey # # Enabled by Richard Hull's excellent luma.oled project (https://github.com/rm-hull/luma.oled) # from __future__ import unicode_literals import time, math,logging import lcd_display_driver import fonts import graphics as g from PIL import Image import logging from luma.core.interface.serial import i2c from luma.core.render import canvas from luma.oled.device import sh1106 from luma.oled.device import ssd1306 from luma.oled.device import ssd1322 from luma.oled.device import ssd1325 from luma.oled.device import ssd1331 from luma.core.error import DeviceNotFoundError try: import RPi.GPIO as GPIO except: logging.debug("RPi.GPIO not installed") class luma_i2c(): def __init__(self, rows=64, cols=128, i2c_address=0x3d, i2c_port=1, devicetype=u'ssd1306'): self.i2c_address = i2c_address self.i2c_port = i2c_port self.rows = rows self.cols = cols self.fb = [[]] # Initialize the default font font = fonts.bmfont.bmfont('latin1_5x8_fixed.fnt') self.fp = font.fontpkg serial = i2c(port=i2c_port, address=i2c_address) if devicetype.lower() == u'ssd1306': self.device = ssd1306(serial) elif devicetype.lower() == u'sh1106': self.device = sh1106(serial) elif devicetype.lower() == u'ssd1322': self.device = ssd1322(serial) elif devicetype.lower() == u'ssd1325': self.device = ssd1325(serial) elif devicetype.lower() == u'ssd1331': self.device = ssd1331(serial) else: raise ValueError('{0} not a recognized luma device type'.format(devicetype)) def clear(self): with canvas(self.device) as draw: draw.rectangle(self.device.bounding_box, outline="black", fill="black") def message(self, text, row=0, col=0, varwidth=True): ''' Send string to LCD. Newline wraps to second line''' if row >= self.rows or col >= self.cols: raise IndexError textwidget = display.gwidgetText(text, self.fp, {}, [], varwidth ) self.update(textwidget.image) def update(self, image): retry = 5 # Make image the same size as the display img = image.crop( (0,0,self.cols, self.rows)) while retry: # send to display try: self.device.display(img) break except IOError: retry -= 1 def msgtest(self, text, wait=1.5): self.clear() self.message(text) time.sleep(wait) if __name__ == '__main__': import getopt,sys,os import graphics as g import fonts import display import moment def processevent(events, starttime, prepost, db, dbp): for evnt in events: t,var,val = evnt if time.time() - starttime >= t: if prepost in ['pre']: db[var] = val elif prepost in ['post']: dbp[var] = val logging.basicConfig(format=u'%(asctime)s:%(levelname)s:%(message)s', handlers=[logging.StreamHandler()], level=logging.DEBUG) try: opts, args = getopt.getopt(sys.argv[1:],"hr:c:",["row=","col=","i2c_address=","i2c_port=","devicetype="]) except getopt.GetoptError: print 'luma_i2c.py -r <rows> -c <cols> --devicetype <devicetype> --i2c_address <addr> --i2c_port <port>' sys.exit(2) # Set defaults rows = 64 cols = 128 i2c_address = 0x3d i2c_port = 1 devicetype = u'ssd1306' for opt, arg in opts: if opt == '-h': print 'luma_i2c.py -r <rows> -c <cols> --devicetype <devicetype> --i2c_address <addr> --i2c_port <port>\nDevice types can be sh1106, ssd1306, ssd1322, ssd1325, and ssd1331' sys.exit() elif opt in ("-r", "--rows"): rows = int(arg) elif opt in ("-c", "--cols"): cols = int(arg) elif opt in ("--devicetype"): devicetype = arg elif opt in ("--i2c_address"): i2c_address = int(arg) elif opt in ("--i2c_port"): i2c_port = int(arg) db = { 'actPlayer':'mpd', 'playlist_position':1, 'playlist_length':5, 'title':"Nicotine & Gravy", 'artist':"Beck", 'album':'Midnight Vultures', 'tracktype':'MP3 Stereo 24 bit 44.1 Khz', 'bitdepth':'16 bits', 'samplerate':'44.1 kHz', 'elapsed':0, 'length':400, 'volume':50, 'stream':'Not webradio', 'utc': moment.utcnow(), 'outside_temp_formatted':'46\xb0F', 'outside_temp_max':72, 'outside_temp_min':48, 'outside_conditions':'Windy', 'system_temp_formatted':'98\xb0C', 'state':'stop', 'system_tempc':81.0 } dbp = { 'actPlayer':'mpd', 'playlist_position':1, 'playlist_length':5, 'title':"Nicotine & Gravy", 'artist':"Beck", 'album':'Midnight Vultures', 'tracktype':'MP3 Stereo 24 bit 44.1 Khz', 'bitdepth':'16 bits', 'samplerate':'44.1 kHz', 'elapsed':0, 'length':400, 'volume':50, 'stream':'Not webradio', 'utc': moment.utcnow(), 'outside_temp_formatted':'46\xb0F', 'outside_temp_max':72, 'outside_temp_min':48, 'outside_conditions':'Windy', 'system_temp_formatted':'98\xb0C', 'state':'stop', 'system_tempc':81.0 } events = [ (15, 'state', 'play'), (20, 'title', 'Mixed Bizness'), (30, 'volume', 80), (40, 'title', 'I Never Loved a Man (The Way I Love You)'), (40, 'artist', 'Aretha Franklin'), (40, 'album', 'The Queen Of Soul'), (70, 'state', 'stop'), (90, 'state', 'play'), (100, 'title', 'Do Right Woman, Do Right Man'), (120, 'volume', 100), (140, 'state', 'play' ) ] DISPLAY_OK = False try: print "LUMA OLED Display Test" print "ROWS={0}, COLS={1}, DEVICETYPE={4}, I2C_ADDRESS={2}, I2C_PORT={3}".format(rows,cols,i2c_address,i2c_port,devicetype) lcd = luma_i2c(rows,cols,i2c_address,i2c_port,devicetype) DISPLAY_OK = True lcd.clear() lcd.message("OLED Display\nStarting",0,0,True) time.sleep(2) lcd.clear() starttime = time.time() elapsed = int(time.time()-starttime) timepos = time.strftime(u"%-M:%S", time.gmtime(int(elapsed))) + "/" + time.strftime(u"%-M:%S", time.gmtime(int(254))) dc = display.display_controller((cols,rows)) f_path = os.path.join(os.path.dirname(__file__), '../pages_ssd1306.py') dc.load(f_path, db,dbp ) starttime=time.time() while True: elapsed = int(time.time()-starttime) db['elapsed']=elapsed db['utc'] = moment.utcnow() processevent(events, starttime, 'pre', db, dbp) img = dc.next() processevent(events, starttime, 'post', db, dbp) lcd.update(img) time.sleep(.001) except KeyboardInterrupt: pass finally: if DISPLAY_OK: lcd.clear() lcd.message("Goodbye!", 0, 0, True) time.sleep(2) lcd.clear() print "Luma OLED Display Test Complete"
mit
163,339,184,375,251,800
24.59127
175
0.649248
false