repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
raiden-network/raiden
raiden/utils/notifying_queue.py
1
1422
from typing import Generic, Iterable, List, TypeVar from gevent.event import Event from gevent.queue import Queue T = TypeVar("T") class NotifyingQueue(Event, Generic[T]): """This is not the same as a JoinableQueue. Here, instead of waiting for all the work to be processed, the wait is for work to be available. """ def __init__(self, maxsize: int = None, items: Iterable[T] = ()) -> None: super().__init__() self.queue = Queue(maxsize, items) if items: self.set() def put(self, item: T) -> None: """Add new item to the queue.""" self.queue.put(item) self.set() def get(self, block: bool = True, timeout: float = None) -> T: """Removes and returns an item from the queue.""" value = self.queue.get(block, timeout) if self.queue.empty(): self.clear() return value def peek(self, block: bool = True, timeout: float = None) -> T: return self.queue.peek(block, timeout) def __len__(self) -> int: return len(self.queue) def copy(self) -> List[T]: """Copies the current queue items.""" copy = self.queue.copy() result = list() while not copy.empty(): result.append(copy.get_nowait()) return result def __repr__(self) -> str: return f"NotifyingQueue(id={id(self)}, num_items={len(self.queue)})"
mit
7,711,695,073,187,145,000
28.020408
77
0.580169
false
pwarren/AGDeviceControl
agdevicecontrol/tests/test_aggregator.py
1
9428
# AGDeviceControl # Copyright (C) 2005 The Australian National University # # This file is part of AGDeviceControl. # # AGDeviceControl is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # AGDeviceControl is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with AGDeviceControl; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import os import random import types import agdevicecontrol from agdevicecontrol.server.aggregator import Aggregator from agdevicecontrol.server.configurator import Configurator from twisted.internet import defer, reactor from twisted.trial import unittest from twisted.spread import pb import agdevicecontrol.server.ports as ports from twisted.test.test_process import SignalMixin from agdevicecontrol.tests.subprocessprotocol import SubProcessProtocol configdata = """ # sample Aggregator.conf [DeviceServer1] host: localhost port: %s password: bkurk """ % ports.deviceserver class TestAggregator(SignalMixin, unittest.TestCase): def setUpClass(self): """Start a DeviceServer in a child process to test against""" self.deviceserverprocess = SubProcessProtocol() self.deviceserverprocess.waitOnStartUp( ['server.py', 'deviceserver.conf', '-n'], \ path=os.path.join(agdevicecontrol.path,'bin') ) if self.deviceserverprocess.running is False: raise unittest.SkipTest, "DeviceServer didn't start correctly, skipping tests" #wait for slow single CPU buildbots to catch up import time time.sleep(1) # use the config above conf = Configurator() conf.fromString(configdata) # can be set by timeout self.failure = False # safety timeout self.timeout = reactor.callLater(10, self.failed, "Aggregator failed to connect to all deviceservers ... failing") self.aggregator = Aggregator(conf) self.done = False while not self.done: print "Waiting for aggregator to connect to deviceservers" reactor.iterate(0.1) if self.aggregator.connected: self.succeeded() if self.failure: raise unittest.SkipTest, "Aggregator didn't connect to all deviceservers ... skipping tests" # FIXME: we really should handle missing and newly appearing deviceservers. # safety timeout self.timeout = reactor.callLater(10, self.failed, "Aggregator failed to map all deviceservers ... failing") self.aggregator.notifyOnMapped(self.succeeded) self.done = False while not self.done: print "Waiting for aggregator to map deviceservers" reactor.iterate(0.1) if self.failure: raise unittest.SkipTest, "Aggregator didn't start correctly, skipping tests" def tearDownClass(self): """Stop the DeviceServer running in a child process""" print "*** tearDownClass: ", self.deviceserverprocess.done self.deviceserverprocess.waitOnShutDown() def succeeded(self, *args): """Allow reactor iteration loop in test proper to exit and pass test""" self.done = True if self.timeout is not None: self.timeout.cancel() # safety timeout no longer necessary self.timeout = None self.lastargs = args # make persistent for later checks def failed(self, reason): """Allow reactor iteration loop in test proper to exit and fail test""" self.done = True self.failure = reason self.timeout.cancel() # safety timeout no longer necessary self.timeout = None def setUp(self): """I'm called at the very beginning of each test""" self.done = False self.failure = None self.timeout = None def tearDown(self): """I'm called at the end of each test""" if self.timeout: self.timeout.cancel() def timedOut(self): """I'm called when the safety timer expires indicating test probably won't complete""" print "timedOut callback, test did not complete" self.failed("Safety timeout callback ... test did not complete") reactor.crash() #---------- tests proper ------------------------------------ def test_handled_configurator(self): """Aggregator instantiated with a configurator rather than .conf filename""" assert 'DeviceServer1' in self.aggregator.config def test_password(self): """Aggregator should have random password""" assert type(self.aggregator.getPassword()) == type("") # ensure a second instance has differing password ... conf = Configurator() conf.fromString('') other = Aggregator(conf) assert other.getPassword() != self.aggregator.getPassword() def test_devicelist_as_deferred(self): """Return aggregated device list""" # safety timeout self.timeout = reactor.callLater(10, self.failed, "retrieving devicelist timed out ... failing") d = self.aggregator.getDeviceList() assert isinstance(d, defer.Deferred) d.addCallback(self.succeeded) # idle until code above triggers succeeded or timeout causes failure while not self.done: reactor.iterate(0.1) # will arrive here eventually when either succeeded or failed method has fired if self.failure: self.failed(self.failure) devicelist = self.lastargs[0] assert len(devicelist) == 2 assert 'Device1' in devicelist assert 'Device2' in devicelist def test_devicemap_as_deferred(self): """Return aggregated device map""" # safety timeout self.timeout = reactor.callLater(10, self.failed, "retrieving devicemap timed out ... failing") d = self.aggregator.getDeviceMap() assert isinstance(d, defer.Deferred) # caution: as this deferred is ready-to-go, the callback is called *immediately* d.addCallback(self.succeeded) # i.e., self.succeeded has now been called # idle until code above triggers succeeded or timeout causes failure while not self.done: reactor.iterate(0.1) # will arrive here eventually when either succeeded or failed method has fired if self.failure: self.failed(self.failure) devicemap = self.lastargs[0] print devicemap assert type(devicemap) == types.DictType assert len(devicemap) == 1 assert 'PseudoDevice' in devicemap assert len(devicemap['PseudoDevice']) == 2 assert 'Device1' in devicemap['PseudoDevice'] assert 'Device2' in devicemap['PseudoDevice'] def test_device_execute(self): """Proxy forward command to correct DeviceServer""" # safety timeout self.timeout = reactor.callLater(10, self.failed, "executing remote setParameter timed out ... failing") # 3-digit random integer value = int(random.random()*1000) # get a device key for use in next step self.done = False d = self.aggregator.getDeviceList() d.addCallback(self.succeeded) d.addErrback(self.failed) while not self.done: reactor.iterate(0.1) if self.failure: self.fail(self.failure) print print "DEBUG:" device = self.lastargs[0][0] print device.name # store number in 'remote' PseudoDevice d = self.aggregator.deviceExecute(device, 'setParameter', value) assert isinstance(d, defer.Deferred) d.addCallback(self.succeeded) # idle until code above triggers succeeded or timeout causes failure self.done = False while not self.done: reactor.iterate(0.1) # will arrive here eventually when either succeeded or failed method has fired if self.failure: self.failed(self.failure) # safety timeout self.timeout = reactor.callLater(10, self.failed, "executing remote getParameter timed out ... failing") # store number in 'remote' PseudoDevice d = self.aggregator.deviceExecute(device, 'getParameter') assert isinstance(d, defer.Deferred) d.addCallback(self.succeeded) # idle until code above triggers succeeded or timeout causes failure self.done = False while not self.done: reactor.iterate(0.1) # will arrive here eventually when either succeeded or failed method has fired if self.failure: self.failed(self.failure) returnvalue = self.lastargs[0] assert returnvalue == value if False: test_handled_configurator = True test_devicelist_as_deferred = True test_devicemap_as_deferred = True test_device_execute = True test_password = True
gpl-2.0
-369,536,809,628,326,600
30.851351
122
0.655176
false
vallemrv/tpvB3
tpv_for_eetop/valle_libs/valleorm/models/relatedfields.py
1
9703
# -*- coding: utf-8 -*- # @Author: Manuel Rodriguez <vallemrv> # @Date: 29-Aug-2017 # @Email: [email protected] # @Last modified by: valle # @Last modified time: 18-Feb-2018 # @License: Apache license vesion 2.0 import sys import inspect import importlib from constant import constant class RelationShip(object): def __init__(self, othermodel, **options): self.tipo_class = constant.TIPO_RELATION self.class_name = "ForeignKey" self.main_module = None self.related_class = None self.main_class = None self.field_related_name = None self.field_related_id = None self.on_delete = constant.CASCADE if type(othermodel) in (str, unicode): self.related_name = othermodel else: self.related_name = othermodel.__name__ self.related_class = othermodel for k, v in options.items(): setattr(self, k, v) def get_id_field_name(self): if self.field_related_name == None: return self.related_name.lower() + "_id" return self.field_related_name def set_id_field_name(self, value): self.field_related_name = value def get(self, **condition): pass field_name_id = property(get_id_field_name, set_id_field_name) class OneToManyField(RelationShip): def __init__(self, main_class, related_name, **kargs): super(OneToManyField, self).__init__(related_name, **kargs) self.class_name = "OneToManyField" self.main_class = main_class self.related_name = related_name if self.main_module == None: self.main_module = self.main_class.__module__ self.related_class = create_class_related(self.main_module, self.related_name) self.tb_name_main = self.main_class.get_db_table() if self.field_related_id == None: self.field_name_id = self.tb_name_main + "_id" def get(self, **condition): query = u"{0}={1}".format(self.field_name_id, self.main_class.id) if 'query' in condition: condition['query'] += " AND " + query else: condition['query'] = query return self.related_class.filter(**condition) def add(self, child): if self.main_class.id == -1: self.main_class.save() setattr(child, self.field_name_id, self.main_class.id) child.save() class ForeignKey(RelationShip): def __init__(self, othermodel, on_delete=constant.CASCADE, **kargs): super(ForeignKey, self).__init__(othermodel, **kargs) self.class_name = "ForeignKey" self.on_delete = on_delete def get_choices(self, **condition): return self.related_class.getAll(**condition) def get_sql_pk(self): sql = u"FOREIGN KEY({0}) REFERENCES {1}(id) %s" % self.on_delete sql = sql.format(self.field_name_id, self.related_name) return sql def get(self): if self.related_class == None: if self.main_module == None: self.main_module = self.main_class.__module__ self.related_class = create_class_related(self.main_module, self.related_name) reg = self.related_class(db_name=self.main_class.db_name) reg.load_by_pk(getattr(self.main_class, self.field_name_id)) return reg class ManyToManyField(RelationShip): def __init__(self, othermodel, db_table_nexo=None, **kargs): super(ManyToManyField, self).__init__(othermodel, **kargs) self.class_name = "ManyToManyField" self.db_table_nexo = db_table_nexo if self.main_class != None: if self.main_module == None: self.main_module = self.main_class.__module__ self.tb_name_main = self.main_class.get_db_table() self.related_class = create_class_related(self.main_module, self.related_name) self.tb_name_related = self.related_class.get_db_table() if self.field_related_id == None: self.field_name_id = self.tb_name_main + "_id" self.field_related_id = self.tb_name_related + "_id" def get_sql_tb_nexo(self): key = "PRIMARY KEY ({0}, {1})".format(self.field_name_id, self.field_related_id) frgKey = u"FOREIGN KEY({0}) REFERENCES {1}(id) ON DELETE CASCADE, " frgKey = frgKey.format(self.field_name_id, self.tb_name_main) frgKey += u"FOREIGN KEY({0}) REFERENCES {1}(id) ON DELETE CASCADE" frgKey = frgKey.format(self.field_related_id, self.tb_name_related) sql = u"CREATE TABLE IF NOT EXISTS {0} ({1}, {2} ,{3}, {4});" sql = sql.format(self.db_table_nexo, self.field_name_id+" INTEGER NOT NULL", self.field_related_id+" INTEGER NOT NULL ",key, frgKey) return sql def get(self, **condition): if "tb_nexo" in condition: self.db_table_nexo = condition["tb_nexo"] if "field_related_id" in condition: self.field_related_id = condition["field_related_id"] if "field_name_id" in condition: self.field_name_id = condition["field_name_id"] condition["columns"] = [self.tb_name_related+".*"] condition["joins"] = [(self.db_table_nexo + " ON "+ \ self.db_table_nexo+"."+self.field_related_id+\ "="+self.tb_name_related+".id")] query = self.field_name_id+"="+str(self.main_class.id) if 'query' in condition: condition["query"] += " AND " + query else: condition["query"] = query if self.related_class == None: if self.main_module == None: self.main_module = self.main_class.__module__ self.related_class = create_class_related(self.main_module, self.related_name) return self.related_class.filter(**condition) def add(self, *childs): for child in childs: child.save() cols = [self.field_name_id, self.field_related_id] values = [str(self.main_class.id), str(child.id)] sql = u"INSERT OR REPLACE INTO {0} ({1}) VALUES ({2});".format(self.db_table_nexo, ", ".join(cols), ", ".join(values)); self.main_class.execute(sql) def delete(self, child): sql = u"DELETE FROM {0} WHERE {1}={2} AND {3}={4};".format(self.db_table_nexo, self.field_name_id, child.id, self.field_related_id, self.main_class.id) self.main_class.execute(sql) class ManyToManyChild(RelationShip): def __init__(self, main_class, related_name, **kargs): super(ManyToManyChild, self).__init__(related_name, **kargs) self.class_name = "ManyToManyChild" self.main_class = main_class self.related_name = related_name if self.main_module == None: self.main_module = self.main_class.__module__ self.related_class = create_class_related(self.main_module, self.related_name) self.tb_name_main = self.main_class.get_db_table() self.tb_name_related = self.related_class.get_db_table() self.db_table_nexo = self.tb_name_related + '_' + self.tb_name_main if self.field_related_id == None: self.field_name_id = self.tb_name_main + "_id" self.field_related_id = self.tb_name_related + "_id" def get(self, **condition): if "tb_nexo" in condition: self.db_table_nexo = condition["tb_nexo"] if "field_related_id" in condition: self.field_related_id = condition["field_related_id"] if "field_name_id" in condition: self.field_name_id = condition["field_name_id"] condition["columns"] = [self.tb_name_related+".*"] condition["joins"] = [(self.db_table_nexo + " ON "+ \ self.db_table_nexo+"."+self.field_related_id+\ "="+self.tb_name_related+".id")] query = self.field_name_id+"="+str(self.main_class.id) if 'query' in condition: condition["query"] += " AND " + query else: condition["query"] = query return self.related_class.filter(**condition) def add(self, *childs): for child in childs: child.save() cols = [self.field_name_id, self.field_related_id] values = [str(self.main_class.id), str(child.id)] sql = u"INSERT OR REPLACE INTO {0} ({1}) VALUES ({2});".format(self.db_table_nexo, ", ".join(cols), ", ".join(values)); self.main_class.execute(sql) def delete(self, child): sql = u"DELETE FROM {0} WHERE {1}={2} AND {3}={4};".format(self.db_table_nexo, self.field_related_id, child.id, self.field_name_id, self.main_class.id) self.main_class.execute(sql) def create_class_related(module, class_name): module = ".".join(module.split(".")[:-1]) modulo = importlib.import_module(module) nclass = getattr(modulo, str(class_name)) return nclass
apache-2.0
-2,517,989,328,502,603,300
37.351779
99
0.547356
false
sassoftware/saspy
saspy/sasiocom.py
1
37140
# # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import csv import io import numbers import os import shlex import sys import warnings try: from win32com.client import dynamic except ImportError: pass try: import pandas as pd except ImportError: pass class SASConfigCOM(object): """ This object is not intended to be used directly. Instantiate a SASSession object instead. """ NO_OVERRIDE = ['kernel', 'sb'] def __init__(self, **kwargs): self._kernel = kwargs.get('kernel') session = kwargs['sb'] sascfg = session.sascfg.SAScfg name = session.sascfg.name cfg = getattr(sascfg, name) opts = getattr(sascfg, 'SAS_config_options', {}) outs = getattr(sascfg, 'SAS_output_options', {}) self.host = cfg.get('iomhost') self.port = cfg.get('iomport') self.user = cfg.get('omruser') self.pw = cfg.get('omrpw') self.authkey = cfg.get('authkey') self.class_id = cfg.get('class_id', '440196d4-90f0-11d0-9f41-00a024bb830c') self.provider = cfg.get('provider') self.encoding = cfg.get('encoding', '') self.output = outs.get('output', 'html5') self.verbose = opts.get('verbose', True) self.verbose = kwargs.get('verbose', self.verbose) self._lock = opts.get('lock_down', True) self._prompt = session.sascfg._prompt if self.authkey is not None: self._set_authinfo() for key, value in filter(lambda x: x[0] not in self.NO_OVERRIDE, kwargs.items()): self._try_override(key, value) def _set_authinfo(self): """ Attempt to set the session user's credentials based on provided key to read from ~/.authinfo file. See .authinfo documentation here: https://documentation.sas.com/api/docsets/authinfo/9.4/content/authinfo.pdf. This method supports a subset of the .authinfo spec, in accordance with other IO access methods. This method will only parse `user` and `password` arguments, but does support spaces in values if the value is quoted. Use python's `shlex` library to parse these values. """ if os.name == 'nt': authfile = os.path.expanduser(os.path.join('~', '_authinfo')) else: authfile = os.path.expanduser(os.path.join('~', '.authinfo')) try: with open(authfile, 'r') as f: # Take first matching line found parsed = (shlex.split(x, posix=False) for x in f.readlines()) authline = next(filter(lambda x: x[0] == self.authkey, parsed), None) except OSError: print('Error trying to read {}'.format(authfile)) authline = None if authline is None: print('Key {} not found in authinfo file: {}'.format(self.authkey, authfile)) elif len(authline) < 5: print('Incomplete authinfo credentials in {}; key: {}'.format(authfile, self.authkey)) else: # Override user/pw if previously set # `authline` is in the following format: # AUTHKEY username USERNAME password PASSWORD self.user = authline[2] self.pw = authline[4] def _try_override(self, attr, value): """ Attempt to override a configuration file option if `self._lock` is False. Otherwise, warn the user. :param attr: Configuration attribute. :param value: Configuration value. """ if self._lock is False: setattr(self, attr, value) else: err = "Param '{}' was ignored due to configuration restriction".format(attr) print(err, file=sys.stderr) class SASSessionCOM(object): """ Initiate a connection to a SAS server and provide access for Windows clients without the Java dependency. Utilizes available COM objects for client communication with the IOM interface. It may be possible to communicate with local SAS instances as well, although this is functionality is untested. A slight change may be required to the `_startsas` method to support local instances. """ SAS_APP = 'SASApp' HTML_RESULT_FILE = 'saspy_results.html' # SASObjectManager.Protocols Enum values PROTOCOL_COM = 0 PROTOCOL_IOM = 2 # SAS Date/Time/Datetime formats FMT_DEFAULT_DATE_NAME = 'E8601DA' FMT_DEFAULT_DATE_LENGTH = 10 FMT_DEFAULT_DATE_PRECISION = 0 FMT_DEFAULT_TIME_NAME = 'E8601TM' FMT_DEFAULT_TIME_LENGTH = 15 FMT_DEFAULT_TIME_PRECISION = 6 FMT_DEFAULT_DATETIME_NAME = 'E8601DT' FMT_DEFAULT_DATETIME_LENGTH = 26 FMT_DEFAULT_DATETIME_PRECISION = 6 # Pandas data types PD_NUM_TYPE = ('i', 'u', 'f', 'c') PD_STR_TYPE = ('S', 'U', 'V') PD_DT_TYPE = ('M') PD_BOOL_TYPE = ('b') # ADODB RecordSet CursorTypeEnum values CURSOR_UNSPECIFIED = -1 CURSOR_FORWARD = 0 CURSOR_KEYSET = 1 CURSOR_DYNAMIC = 2 CURSOR_STATIC = 3 # ADODB RecordSet LockTypeEnum values LOCK_UNSPECIFIED = -1 LOCK_READONLY = 1 LOCK_PESSIMISTIC = 2 LOCK_OPTIMISTIC = 3 LOCK_BATCH_OPTIMISTIC = 4 # ADODB RecordSet CommandTypeEnum values CMD_UNSPECIFIED = -1 CMD_TEXT = 1 CMD_TABLE = 2 CMD_STORED_PROC = 4 CMD_UNKNOWN = 8 CMD_FILE = 256 CMD_TABLE_DIRECT = 512 # ADODB Connection SchemaEnum values SCHEMA_COLUMNS = 4 SCHEMA_TABLES = 20 # ADODB ObjectStateEnum values STATE_CLOSED = 0 STATE_OPEN = 1 # FileService StreamOpenMode values STREAM_READ = 1 STREAM_WRITE = 2 def __init__(self, **kwargs): self._log = '' self.sascfg = SASConfigCOM(**kwargs) self._sb = kwargs.get('sb') self.pid = self._startsas() def __del__(self): if self.adodb.State == self.STATE_OPEN: self._endsas() def _startsas(self) -> str: """ Create a workspace and open a connection with SAS. :return [str]: """ if getattr(self, 'workspace', None) is not None: # Do not create a new connection return self.workspace.UniqueIdentifier factory = dynamic.Dispatch('SASObjectManager.ObjectFactoryMulti2') server = dynamic.Dispatch('SASObjectManager.ServerDef') self.keeper = dynamic.Dispatch('SASObjectManager.ObjectKeeper') self.adodb = dynamic.Dispatch('ADODB.Connection') if self.sascfg.host is None: # Create a local connection. server.MachineDNSName = '127.0.0.1' server.Port = 0 server.Protocol = self.PROTOCOL_COM user = None password = None else: # Create a remote connection. The following are required: # 1. host # 2. port # 3. class_id server.MachineDNSName = self.sascfg.host server.Port = self.sascfg.port server.Protocol = self.PROTOCOL_IOM server.ClassIdentifier = self.sascfg.class_id if self.sascfg.user is not None: user = self.sascfg.user else: user = self.sascfg._prompt('Username: ') if self.sascfg.pw is not None: password = self.sascfg.pw else: password = self.sascfg._prompt('Password: ', pw=True) self.workspace = factory.CreateObjectByServer(self.SAS_APP, True, server, user, password) self.keeper.AddObject(1, 'WorkspaceObject', self.workspace) self.adodb.Open('Provider={}; Data Source=iom-id://{}'.format( self.sascfg.provider, self.workspace.UniqueIdentifier)) ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;", "text") if self.sascfg.verbose: print("SAS Connection established. Workspace UniqueIdentifier is "+str(self.workspace.UniqueIdentifier)+"\n") return self.workspace.UniqueIdentifier def _endsas(self): """ Close a connection with SAS. """ self.adodb.Close() self.keeper.RemoveObject(self.workspace) self.workspace.Close() if self.sascfg.verbose: print("SAS Connection terminated. Workspace UniqueIdentifierid was "+str(self.pid)) def _getlst(self, buf: int=2048) -> str: """ Flush listing. :option buf [int]: Download buffer. Default 2048. :return [str]: """ flushed = self.workspace.LanguageService.FlushList(buf) result = flushed while flushed: flushed = self.workspace.LanguageService.FlushList(buf) result += flushed return result def _getlog(self, buf: int=2048) -> str: """ Flush log. :option buf [int]: Download buffer. Default 2048. :return [str]: """ flushed = self.workspace.LanguageService.FlushLog(buf) result = flushed while flushed: flushed = self.workspace.LanguageService.FlushLog(buf) result += flushed # Store flush result in running log self._log += result if result.count('ERROR:') > 0: warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem") self._sb.check_error_log = True return result def _getfile(self, fname: str, buf: int=2048, decode: bool=False) -> str: """ Use object file service to download a file from the provider. :param fname [str]: Filename. :option buf [int]: Download buffer. Default 2048. :option decode [bool]: Decode the byte stream. :return [str]: """ fobj = self.workspace.FileService.AssignFileref('outfile', 'DISK', fname, '', '') # Use binary stream to support text and image transfers. The binary # stream interface does not require a max line length, which allows # support of arbitrarily wide tables. stream = fobj[0].OpenBinaryStream(self.STREAM_READ) flushed = stream.Read(buf) result = bytes(flushed) while flushed: flushed = stream.Read(buf) result += bytes(flushed) stream.Close() self.workspace.FileService.DeassignFileref(fobj[0].FilerefName) if decode is True: result = result.decode(self.sascfg.encoding, errors='replace') return result def _gethtmlfn(self) -> str: """ Return the path of the output HTML file. This is the combination of the `workpath` attribute and `HTML_RESULT_FILE` constant. :return [str]: """ return self._sb.workpath + self.HTML_RESULT_FILE def _reset(self): """ Reset the LanguageService interface to its initial state with respect to token scanning. Use it to release the LanguageService from an error state associated with the execution of invalid syntax or incomplete program source. This primarily occurs when a statement is submitted without a trailing semicolon. """ self.workspace.LanguageService.Reset() def _tablepath(self, table: str, libref: str=None) -> str: """ Define a sas dataset path based on a table name and optional libref name. Will return a two-level or one-level path string based on the provided arguments. One-level names are of this form: `table`, while two-level names are of this form: `libref.table`. If libref is not defined, SAS will implicitly define the library to WORK or USER. The USER library needs to have been defined previously in SAS, otherwise WORK is the default option. If the `libref` parameter is any value that evaluates to `False`, the one-level path is returned. :param table [str]: SAS data set name. :option libref [str]: Optional library name. :return [str]: """ if not libref: path = "'{}'n".format(table.strip()) else: path = "{}.'{}'n".format(libref, table.strip()) return path def _schema(self, table: str, libref: str=None) -> dict: """ Request a table schema for a given `libref.table`. :param table [str]: Table name :option libref [str]: Library name. :return [dict]: """ #tablepath = self._tablepath(table, libref=libref) if not libref: tablepath = table else: tablepath = "{}.{}".format(libref, table) criteria = [None, None, tablepath] schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria) schema.MoveFirst() metadata = {} while not schema.EOF: col_info = {x.Name: x.Value for x in schema.Fields} if col_info['FORMAT_NAME'] in self._sb.sas_date_fmts: col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(days=x) if x else x elif col_info['FORMAT_NAME'] in self._sb.sas_datetime_fmts: col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(seconds=x) if x else x # elif FIXME TIME FORMATS else: col_info['CONVERT'] = lambda x: x metadata[col_info['COLUMN_NAME']] = col_info schema.MoveNext() schema.Close() return metadata def _prompt(self, key: str, hide: bool=False) -> tuple: """ Ask the user for input about a given key. :param key [str]: Key name. :option hide [bool]: Hide user keyboard input. :return [tuple]: """ input_ok = False while input_ok is False: val = self.sascfg._prompt('Enter value for macro variable {} '.format(key), pw=hide) if val is None: raise RuntimeError("No value for prompted macro variable provided.") if val: input_ok = True else: print('Input not valid.') return (key, val) def _asubmit(self, code: str, results: str='html'): """ Submit any SAS code. Does not return a result. :param code [str]: SAS statements to execute. """ # Support html ods if results.lower() == 'html': ods_open = """ ods listing close; ods {} (id=saspy_internal) options(bitmap_mode='inline') file="{}" device=svg style={}; ods graphics on / outputfmt=png; """.format(self.sascfg.output, self._gethtmlfn(), self._sb.HTML_Style) ods_close = """ ods {} (id=saspy_internal) close; ods listing; """.format(self.sascfg.output) else: ods_open = '' ods_close = '' # Submit program full_code = ods_open + code + ods_close self.workspace.LanguageService.Submit(full_code) def submit(self, code: str, results: str='html', prompt: dict=None, **kwargs) -> dict: """ Submit any SAS code. Returns log and listing as dictionary with keys LOG and LST. :param code [str]: SAS statements to execute. :option results [str]: Result format. Options: HTML, TEXT. Default HTML. :option prompt [dict]: Create macro variables from prompted keys. """ RESET = """;*';*";*/;quit;run;""" prompt = prompt if prompt is not None else {} printto = kwargs.pop('undo', False) macro_declare = '' for key, value in prompt.items(): macro_declare += '%let {} = {};\n'.format(*self._prompt(key, value)) # Submit program self._asubmit(RESET + macro_declare + code + RESET, results) # Retrieve listing and log log = self._getlog() if results.lower() == 'html': # Make the following replacements in HTML listing: # 1. Swap \x0c for \n # 2. Change body class selector # 3. Increase font size listing = self._getfile(self._gethtmlfn(), decode=True) \ .replace(chr(12), chr(10)) \ .replace('<body class="c body">', '<body class="l body">') \ .replace('font-size: x-small;', 'font-size: normal;') else: listing = self._getlst() # Invalid syntax will put the interface in to an error state. Reset # the LanguageService to prevent further errors. # FIXME: In the future, may only want to reset on ERROR. However, this # operation seems pretty lightweight, so calling `_reset()` on all # submits is not a burden. self._reset() if printto: self._asubmit("\nproc printto;run;\n", 'text') log += self._getlog() self._sb._lastlog = log return {'LOG': log, 'LST': listing} def saslog(self) -> str: """ Return the full SAS log. :return [str]: """ return self._log def exist(self, table: str, libref: str=None) -> bool: """ Determine if a `libref.table` exists. :param table [str]: Table name :option libref [str]: Library name. :return [bool]: """ #tablepath = self._tablepath(table, libref=libref) #criteria = [None, None, tablepath] #schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria) #exists = not schema.BOF #schema.Close() #return exists code = 'data _null_; e = exist("' if len(libref): code += libref+"." code += "'"+table.strip()+"'n"+'"'+");\n" code += 'v = exist("' if len(libref): code += libref+"." code += "'"+table.strip()+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n" code += "te='TABLE_EXISTS='; put te e;run;\n" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("TABLE_EXISTS= ") l2 = l2[2].partition("\n") exists = int(l2[0]) return bool(exists) def read_sasdata(self, table: str, libref: str=None, dsopts: dict=None) -> tuple: """ Read any SAS dataset and return as a tuple of header, rows :param table [str]: Table name :option libref [str]: Library name. :option dsopts [dict]: Dataset options. :return [tuple]: """ TARGET = '_saspy_sd2df' EXPORT = """ data {tgt}; set {tbl} {dopt}; run; """ dsopts = self._sb._dsopts(dsopts) if dsopts is not None else '' tablepath = self._tablepath(table, libref=libref) recordset = dynamic.Dispatch('ADODB.RecordSet') # Create an intermediate dataset with `dsopts` applied export = EXPORT.format(tgt=TARGET, tbl=tablepath, dopt=dsopts) self.workspace.LanguageService.Submit(export) meta = self._schema(TARGET) # Connect RecordSet object to ADODB connection with params: # Cursor: Forward Only # Lock: Read Only # Command: Table Direct recordset.Open(TARGET, self.adodb, self.CURSOR_FORWARD, self.LOCK_READONLY, self.CMD_TABLE_DIRECT) recordset.MoveFirst() header = [x.Name for x in recordset.Fields] rows = [] while not recordset.EOF: rows.append([meta[x.Name]['CONVERT'](x.Value) for x in recordset.Fields]) recordset.MoveNext() recordset.Close() return (header, rows, meta) def read_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=False, opts: dict=None): """ Submit an import job to the SAS workspace. :param filepath [str]: File URI. :param table [str]: Table name. :option libref [str]: Library name. :option nosob [bool]: Return the SAS code instead of executing it. :option opts [dict]: SAS PROC IMPORT options. """ opts = opts if opts is not None else {} filepath = 'url ' + filepath if filepath.lower().startswith('http') else filepath tablepath = self._tablepath(table, libref=libref) proc_code = """ filename csv_file "{}"; proc import datafile=csv_file out={} dbms=csv replace; {} run; """.format(filepath.replace('"', '""'), tablepath, self._sb._impopts(opts)) if nosub is True: return proc_code else: return self.submit(proc_code, 'text') def write_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=True, dsopts: dict=None, opts: dict=None): """ Submit an export job to the SAS workspace. :param filepath [str]: File URI. :param table [str]: Table name. :option libref [str]: Library name. :option nosob [bool]: Return the SAS code instead of executing it. :option opts [dict]: SAS PROC IMPORT options. :option dsopts [dict]: SAS dataset options. """ opts = opts if opts is not None else {} dsopts = dsopts if dsopts is not None else {} tablepath = self._tablepath(table, libref=libref) proc_code = """ filename csv_file "{}"; proc export data={} {} outfile=csv_file dbms=csv replace; {} run; """.format(filepath.replace('"', '""'), tablepath, self._sb._dsopts(dsopts), self._sb._expopts(opts)) if nosub is True: return proc_code else: return self.submit(proc_code, 'text')['LOG'] def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a', libref: str ="", keep_outer_quotes: bool=False, embedded_newlines: bool=True, LF: str = '\x01', CR: str = '\x02', colsep: str = '\x03', colrep: str = ' ', datetimes: dict={}, outfmts: dict={}, labels: dict={}, outdsopts: dict={}, encode_errors = None, char_lengths = None, **kwargs): """ Create a SAS dataset from a pandas data frame. :param df [pd.DataFrame]: Pandas data frame containing data to write. :param table [str]: Table name. :option libref [str]: Library name. Default work. None of these options are used by this access method; they are needed for other access methods keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off. embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01' CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02' colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03' colrep - the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' ' datetimes - not implemented yet in this access method outfmts - not implemented yet in this access method labels - not implemented yet in this access method outdsopts - not implemented yet in this access method encode_errors - not implemented yet in this access method char_lengths - not implemented yet in this access method """ DATETIME_NAME = 'DATETIME26.6' DATETIME_FMT = '%Y-%m-%dT%H:%M:%S.%f' if self.sascfg.verbose: if keep_outer_quotes != False: print("'keep_outer_quotes=' is not used with this access method. option ignored.") if embedded_newlines != True: print("'embedded_newlines=' is not used with this access method. option ignored.") if LF != '\x01' or CR != '\x02' or colsep != '\x03': print("'LF=, CR= and colsep=' are not used with this access method. option(s) ignored.") if datetimes != {}: print("'datetimes=' is not used with this access method. option ignored.") if outfmts != {}: print("'outfmts=' is not used with this access method. option ignored.") if labels != {}: print("'labels=' is not used with this access method. option ignored.") if outdsopts != {}: print("'outdsopts=' is not used with this access method. option ignored.") if encode_errors: print("'encode_errors=' is not used with this access method. option ignored.") if char_lengths: print("'char_lengths=' is not used with this access method. option ignored.") tablepath = self._tablepath(table, libref=libref) if type(df.index) != pd.RangeIndex: warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred") columns = [] formats = {} for i, name in enumerate(df.columns): if df[name].dtypes.kind in self.PD_NUM_TYPE: # Numeric type definition = "'{}'n num".format(name) formats[name] = lambda x: str(x) if pd.isnull(x) is False else 'NULL' elif df[name].dtypes.kind in self.PD_STR_TYPE: # Character type # NOTE: If a character string contains a single `'`, replace # it with `''`. This is the SAS equivalent to `\'`. length = df[name].map(len).max() definition = "'{}'n char({})".format(name, length) formats[name] = lambda x: "'{}'".format(x.replace("'", "''")) if pd.isnull(x) is False else 'NULL' elif df[name].dtypes.kind in self.PD_DT_TYPE: # Datetime type definition = "'{}'n num informat={} format={}".format(name, DATETIME_NAME, DATETIME_NAME) formats[name] = lambda x: "'{:{}}'DT".format(x, DATETIME_FMT) if pd.isnull(x) is False else 'NULL' else: # Default to character type # NOTE: If a character string contains a single `'`, replace # it with `''`. This is the SAS equivalent to `\'`. length = df[name].map(str).map(len).max() definition = "'{}'n char({})".format(name, length) formats[name] = lambda x: "'{}'".format(x.replace("'", "''")) if pd.isnull(x) is False else 'NULL' columns.append(definition) sql_values = [] for index, row in df.iterrows(): vals = [] for i, col in enumerate(row): func = formats[df.columns[i]] vals.append(func(col)) sql_values.append('values({})'.format(', '.join(vals))) sql_create = 'create table {} ({});'.format(tablepath, ', '.join(columns)) sql_insert = 'insert into {} {};'.format(tablepath, '\n'.join(sql_values)) self.adodb.Execute(sql_create) self.adodb.Execute(sql_insert) return None def sasdata2dataframe(self, table: str, libref: str=None, dsopts: dict=None, method: str='', **kwargs) -> 'pd.DataFrame': """ Create a pandas data frame from a SAS dataset. :param table [str]: Table name. :option libref [str]: Library name. :option dsopts [dict]: Dataset options. :option method [str]: Download method. :option tempkeep [bool]: Download the csv file if using the csv method. :option tempfile [str]: File path for the saved output file. :return [pd.DataFrame]: """ # strip off unused by this access method options from kwargs # so they can't be passes to panda later rowsep = kwargs.pop('rowsep', ' ') colsep = kwargs.pop('colsep', ' ') rowrep = kwargs.pop('rowrep', ' ') colrep = kwargs.pop('colrep', ' ') if method.upper() == 'DISK': print("This access method doesn't support the DISK method. Try CSV or MEMORY") return None if method.upper() == 'CSV': df = self.sasdata2dataframeCSV(table, libref, dsopts=dsopts, **kwargs) else: my_fmts = kwargs.pop('my_fmts', False) k_dts = kwargs.pop('dtype', None) if self.sascfg.verbose: if my_fmts != False: print("'my_fmts=' is not supported in this access method. option ignored.") if k_dts is not None: print("'dtype=' is only used with the CSV version of this method. option ignored.") header, rows, meta = self.read_sasdata(table, libref, dsopts=dsopts) df = pd.DataFrame.from_records(rows, columns=header, **kwargs) for col in meta.keys(): if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts: df[col] = pd.to_datetime(df[col], errors='coerce') elif meta[col]['DATA_TYPE'] == 5: df[col] = pd.to_numeric(df[col], errors='coerce') return df def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None, tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame': """ Create a pandas data frame from a SAS dataset. :param table [str]: Table name. :option libref [str]: Library name. :option dsopts [dict]: Dataset options. :option opts [dict]: dictionary containing any of the following Proc Export options(delimiter, putnames) :option tempkeep [bool]: Download the csv file if using the csv method. :option tempfile [str]: File path for the saved output file. :return [pd.DataFrame]: """ FORMAT_STRING = '{column} {format}{length}.{precision}' EXPORT = """ data _saspy_sd2df; format {fmt}; set {tbl}; run; proc export data=_saspy_sd2df {dopt} outfile="{out}" dbms=csv replace; {exopts} run; """ k_dts = kwargs.get('dtype', None) my_fmts = kwargs.pop('my_fmts', False) if self.sascfg.verbose: if my_fmts != False: print("'my_fmts=' is not supported in this access method. option ignored.") sas_csv = '{}saspy_sd2df.csv'.format(self._sb.workpath) dopts = self._sb._dsopts(dsopts) if dsopts is not None else '' tablepath = self._tablepath(table, libref=libref) expopts = self._sb._expopts(kwargs.pop('opts', {})) # Convert any date format to one pandas can understand (ISO-8601). # Save a reference of the column name in a list so pandas can parse # the column during construction. datecols = [] fmtlist = [] meta = self._schema(table, libref) for name, col in meta.items(): if col['FORMAT_NAME'] in self._sb.sas_date_fmts: datecols.append(name) col_format = self.FMT_DEFAULT_DATE_NAME col_length = self.FMT_DEFAULT_DATE_LENGTH col_precis = self.FMT_DEFAULT_DATE_PRECISION elif col['FORMAT_NAME'] in self._sb.sas_datetime_fmts: datecols.append(name) col_format = self.FMT_DEFAULT_DATETIME_NAME col_length = self.FMT_DEFAULT_DATETIME_LENGTH col_precis = self.FMT_DEFAULT_DATETIME_PRECISION # elif FIXME TIME FORMATS else: col_format = col['FORMAT_NAME'] col_length = col['FORMAT_LENGTH'] col_precis = col['FORMAT_DECIMAL'] if col['FORMAT_NAME']: full_format = FORMAT_STRING.format( column=col['COLUMN_NAME'], format=col_format, length=col_length, precision=col_precis) fmtlist.append(full_format) export = EXPORT.format(fmt=' '.join(fmtlist), tbl=tablepath, dopt=dopts, exopts=expopts, out=sas_csv) # Use `LanguageService.Submit` instead of `submit` for a slight # performance bump. We don't need the log or listing here so skip # the wrapper function. self.workspace.LanguageService.Submit(export) outstring = self._getfile(sas_csv, decode=True) # Write temp file if requested by user if kwargs.get('tempkeep') is True and kwargs.get('tempfile') is not None: with open(kwargs['tempfile'], 'w') as f: f.write(outstring) df = pd.read_csv(io.StringIO(outstring), parse_dates=datecols, **kwargs) if k_dts is None: # don't override these if user provided their own dtypes for col in meta.keys(): if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts: df[col] = pd.to_datetime(df[col], errors='coerce') return df def upload(self, local: str, remote: str, overwrite: bool=True, permission: str='', **kwargs): """ Upload a file to the SAS server. :param local [str]: Local filename. :param remote [str]: Local filename. :option overwrite [bool]: Overwrite the file if it exists. :option permission [str]: See SAS filename statement documentation. """ perms = "PERMISSION='{}'".format(permission) if permission else '' valid = self._sb.file_info(remote, quiet=True) if valid == {}: # Parameter `remote` references a directory. Default to using the # filename in `local` path. remote_file = remote + self._sb.hostsep + os.path.basename(local) elif valid is not None and overwrite is False: # Parameter `remote` references a file that exists but we cannot # overwrite it. # TODO: Raise exception here instead of returning dict return {'Success': False, 'LOG': 'File {} exists and overwrite was set to False. Upload was stopped.'.format(remote)} else: remote_file = remote with open(local, 'rb') as f: fobj = self.workspace.FileService.AssignFileref('infile', 'DISK', remote_file, perms, '') stream = fobj[0].OpenBinaryStream(self.STREAM_WRITE) stream.Write(f.read()) stream.Close() self.workspace.FileService.DeassignFileref(fobj[0].FilerefName) return {'Success': True, 'LOG': 'File successfully written using FileService.'} def download(self, local: str, remote: str, overwrite: bool=True, **kwargs): """ Download a file from the SAS server. :param local [str]: Local filename. :param remote [str]: Local filename. :option overwrite [bool]: Overwrite the file if it exists. """ valid = self._sb.file_info(remote, quiet=True) if valid is None: # Parameter `remote` references an invalid file path. # TODO: Raise exception here instead of returning dict return {'Success': False, 'LOG': 'File {} does not exist.'.format(remote)} elif valid == {}: # Parameter `remote` references a directory. # TODO: Raise exception here instead of returning dict return {'Success': False, 'LOG': 'File {} is a directory.'.format(remote)} if os.path.isdir(local) is True: # Parameter `local` references a directory. Default to using the # filename in `remote` path. local_file = os.path.join(local, remote.rpartition(self._sb.hostsep)[2]) else: local_file = local with open(local_file, 'wb') as f: f.write(self._getfile(remote)) return {'Success': True, 'LOG': 'File successfully read using FileService.'}
apache-2.0
6,462,494,287,132,911,000
38.135933
154
0.575229
false
CindyvdVries/News_Crawler
Sat2/sat/pipelines.py
1
2575
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html from scrapy.utils.conf import get_config from scrapy.exceptions import DropItem import pika.credentials import pika import json import logging class JsonWriterPipeline(object): def __init__(self): self.file = open('items.jl', 'wb') def process_item(self, item, spider): line = json.dumps(dict(item)) + "\n" self.file.write(line) return item class DuplicatePipeline(object): def __init__(self): self.ids_seen = set() def process_item(self, item, spider): if item['id'] in self.ids_seen: raise DropItem("Duplicate item found: %s" % item) else: self.ids_seen.add(item['id']) return item class RabbitMQPipeline(object): def __init__(self): self.logger = logging.getLogger(self.__class__.__name__) self.logger.info("Constructing rabbitmq logger") username = get_config().get('rabbitmq', 'username') password = get_config().get('rabbitmq', 'password') credentials = pika.credentials.PlainCredentials( username=username, password=password ) host = get_config().get('rabbitmq', 'host') parameters = pika.ConnectionParameters( host=host, port=5672, virtual_host='/', credentials=credentials ) connection = pika.BlockingConnection( parameters=parameters ) channel = connection.channel() # we're publishing to two channels, the download request # so that a download queue can pick it up channel.queue_declare('crisis_download_requests') # and a fanout exchange to notify listeners that we've crawled something channel.exchange_declare( 'crisis_crawl', type='fanout' ) self.channel = channel def process_item(self, item, spider): self.logger.info('sending message') serialized = json.dumps(dict(item)) # send to the work queue self.channel.basic_publish( exchange='', routing_key='crisis_download_requests', body='%s' % (serialized,) ) # and to the channel self.channel.basic_publish( exchange='crisis_crawl', routing_key='', body='%s' % (serialized,) ) return item
gpl-3.0
1,274,635,759,553,769,500
30.024096
80
0.593398
false
ltn100/prompty
test/test_functionContainer.py
1
1265
#!/usr/bin/env python # vim:set softtabstop=4 shiftwidth=4 tabstop=4 expandtab: from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import sys import os from test import prompty from test import UnitTestWrapper class MyFunctions(prompty.functionBase.PromptyFunctions): def testFunc(self): return "This Is A Test" def _hiddenFunc(self): return "This is secret" class FunctionContainerTests(UnitTestWrapper): def test_noname(self): c = prompty.functionContainer.FunctionContainer() self.assertRaises(TypeError, c._call) def test_extendFunctionContainer(self): c = prompty.functionContainer.FunctionContainer() # Import this module c.addFunctionsFromModule(sys.modules[__name__]) self.assertEqual(r"This Is A Test", c._call("testFunc")) self.assertRaises(KeyError, c._call, "_hiddenFunc") def test_extendFunctionContainerFromDir(self): c = prompty.functionContainer.FunctionContainer() # Import this directory c.addFunctionsFromDir(os.path.dirname(sys.modules[__name__].__file__)) self.assertEqual(r"This Is A Test", c._call("testFunc"))
mit
-4,404,599,834,782,620,000
31.435897
78
0.705138
false
PieterMostert/Lipgloss
model/serializers/oxideserializer.py
1
2233
import json try: from lipgloss.core_data import Oxide except: from ..lipgloss.core_data import Oxide class OxideSerializer(object): """A class to support serializing/deserializing of a single oxide and dictionaries of oxides. Needs improvement""" @staticmethod def get_serializable_oxide(oxide): """A serializable oxide is one that can be serialized to JSON using the python json encoder.""" serializable_oxide = {} serializable_oxide["molar_mass"] = oxide.molar_mass serializable_oxide["flux"] = oxide.flux serializable_oxide["min_threshhold"] = oxide.min_threshhold return serializable_oxide @staticmethod def serialize(oxide): """Serialize a single Oxide object to JSON.""" return json.dumps(OxideSerializer.get_serializable_oxide(oxide), indent=4) @staticmethod def serialize_dict(oxide_dict): """Convert a dictionary of Oxide objects to serializable dictionary. Use json.dump(output, file) to save output to file""" serializable_dict = {}; for index, oxide in oxide_dict.items(): serializable_dict[index] = OxideSerializer.get_serializable_oxide(oxide) return serializable_dict @staticmethod def get_oxide(serialized_oxide): """Convert a serialized oxide (a dict) returned by the JSON decoder into a Oxide object.""" oxide = Oxide(serialized_oxide["molar_mass"], serialized_oxide["flux"], serialized_oxide["min_threshhold"]) return oxide @staticmethod def deserialize(json_str): """Deserialize a single oxide from JSON to a Oxide object.""" serialized_oxide_dict = json.loads(json_str) return OxideSerializer.get_oxide(serialized_oxide_dict) @staticmethod def deserialize_dict(serialized_oxide_dict): """Deserialize a number of oxides from JSON to a dict containing Oxide objects, indexed by Oxide name.""" oxide_dict = {} for i, serialized_oxide in serialized_oxide_dict.items(): oxide_dict[i] = OxideSerializer.get_oxide(serialized_oxide) return oxide_dict
gpl-3.0
1,879,315,643,800,368,000
39.6
119
0.653829
false
saginoam/RedisModuleTimeSeries
benchmark/redis_ts.py
1
1285
import random import redis import json client = redis.Redis() client.execute_command('TS.CREATEDOC', "tsdoctest", json.dumps({ "interval": "hour", "timestamp": "2016:01:01 00:00:00", "key_fields": ["userId", "deviceId"], "ts_fields": ["pagesVisited", "storageUsed", "trafficUsed"] })) def doc(userId, deviceId, hour, minute): return json.dumps({ "userId": userId, "deviceId": deviceId, "pagesVisited": random.randint(1, 10), "storageUsed": random.randint(1, 10), "trafficUsed": random.randint(1, 10), "timestamp": "2016:01:01 %.2d:%.2d:00" % (hour, minute) }) # Simulate a data stream such as logstash with input kafka output redis for hour in range(0, 24): for minute in range(0, 60, 5): client.execute_command('TS.INSERTDOC', "tsdoctest", doc("user1", "deviceA", hour, minute)) client.execute_command('TS.INSERTDOC', "tsdoctest", doc("user1", "deviceA", hour, minute)) client.execute_command('TS.INSERTDOC', "tsdoctest", doc("user1", "deviceB", hour, minute)) client.execute_command('TS.INSERTDOC', "tsdoctest", doc("user2", "deviceB", hour, minute)) client.execute_command('TS.INSERTDOC', "tsdoctest", doc("user2", "deviceC", hour, minute))
mit
-8,531,883,941,304,618,000
39.15625
98
0.627237
false
FelixLoether/blog-project
tests/posts/test_edit.py
1
3272
from tests.conftest import get_token def edit_post(postsetup, token, title='title', content='content', tags='', action='save', url='/1/edit'): return postsetup.app.post(url, data={ 'token': token, 'title': title, 'content': content, 'tags': tags, 'action': action }, follow_redirects=True) def test_anonymous_cannot_get_edit_page(postsetup): r = postsetup.app.get('/1/edit', follow_redirects=True) assert r.status_code == 403 assert 'You need to be logged in to view that content' in r.data postsetup.done() def test_anonymous_cannot_edit_post(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') token = get_token(r.data) postsetup.app.get('/logout') title = 'title-{0}'.format(token) r = edit_post(postsetup, token, title=title) assert r.status_code == 403 assert 'You need to be logged in to view that content' in r.data r = postsetup.app.get('/') assert title not in r.data postsetup.done() def test_anonymous_cannot_preview_post(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') token = get_token(r.data) postsetup.app.get('/logout') r = edit_post(postsetup, token) assert r.status_code == 403 assert 'You need to be logged in to view that content' in r.data postsetup.done() def test_admin_can_get_edit_page(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') assert r.status_code == 200 assert 'Create Post' in r.data postsetup.done() def test_admin_can_edit_post(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') token = get_token(r.data) title = 'title-{0}'.format(token) r = edit_post(postsetup, token, title=title) assert r.status_code == 200 assert title in r.data assert 'Success' in r.data postsetup.done() def test_admin_can_preview_post(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') token = get_token(r.data) title = 'title-{0}'.format(token) r = edit_post(postsetup, token, title=title, action='preview') assert r.status_code == 200 assert '<article>' in r.data assert title in r.data postsetup.done() def test_invalid_token_prevents_creation(postsetup): postsetup.login() r = edit_post(postsetup, 'invalid-token') assert 'Tokens did not match.' in r.data postsetup.done() def test_cannot_get_edit_page_for_nonexisting_post(postsetup): postsetup.login() r = postsetup.app.get('/2/edit') assert r.status_code == 404 assert 'That post does not exist.' in r.data postsetup.done() def test_cannot_edit_nonexisting_post(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') token = get_token(r.data) r = edit_post(postsetup, token, url='/2/edit') assert r.status_code == 404 assert 'That post does not exist.' in r.data postsetup.done() def test_cannot_preview_nonexisting_post(postsetup): postsetup.login() r = postsetup.app.get('/1/edit') token = get_token(r.data) r = edit_post(postsetup, token, url='/2/edit', action='preview') assert r.status_code == 404 assert 'That post does not exist.' in r.data postsetup.done()
mit
-8,354,589,686,144,787,000
25.819672
74
0.648227
false
grammarware/slps
topics/recovery/hunter/getgrammar.py
1
4880
#!/Library/Frameworks/Python.framework/Versions/3.1/bin/python3 # -*- coding: utf-8 -*- import os, sys import xml.etree.ElementTree as ET sys.path.append(os.getcwd().split('projects')[0]+'projects/slps/shared/python') import BGF3 from functools import reduce debug = False #debug = True defaults = {'definition-separator-symbol':'|||||'} config = {} masked = {} always_terminals = [] always_nonterminals = [] ignore_tokens = [] ignore_lines = [] nonterminals_alphabet = ['-','_'] nonterminals_start = [] multiples = [] aliases = {} metasymbols = \ [ 'DEFINING-SYMBOL', 'TERMINATOR-SYMBOL', 'MULTIPLE-DEFINING-SYMBOL', 'DEFINITION-SEPARATOR-SYMBOL', 'START-GROUP-SYMBOL', 'END-GROUP-SYMBOL', 'START-OPTION-SYMBOL', 'END-OPTION-SYMBOL', 'START-REPETITION-STAR-SYMBOL', 'END-REPETITION-STAR-SYMBOL', 'START-REPETITION-PLUS-SYMBOL', 'END-REPETITION-PLUS-SYMBOL', 'START-SEPLIST-STAR-SYMBOL', 'END-SEPLIST-STAR-SYMBOL', 'START-SEPLIST-PLUS-SYMBOL', 'END-SEPLIST-PLUS-SYMBOL', 'POSTFIX-OPTION-SYMBOL', 'POSTFIX-REPETITION-STAR-SYMBOL', 'POSTFIX-REPETITION-PLUS-SYMBOL', ] specials = \ [ 'POSSIBLE-TERMINATOR-SYMBOL', 'CONCATENATE-SYMBOL', 'LINE-CONTINUATION-SYMBOL' 'START-TERMINAL-SYMBOL', 'END-TERMINAL-SYMBOL', 'START-NONTERMINAL-SYMBOL', 'END-NONTERMINAL-SYMBOL', 'NONTERMINAL-IF-CONTAINS', 'NONTERMINAL-IF-DEFINED', 'NONTERMINAL-IF-UPPERCASE', 'NONTERMINAL-IF-LOWERCASE', 'NONTERMINAL-IF-CAMELCASE', 'NONTERMINAL-IF-MIXEDCASE', 'TERMINAL-IF-UNDEFINED', 'TERMINAL-IF-UPPERCASE', 'TERMINAL-IF-LOWERCASE', 'TERMINAL-IF-CAMELCASE', 'TERMINAL-IF-MIXEDCASE', 'IGNORE-EXTRA-NEWLINES', 'GLUE-NONALPHANUMERIC-TERMINALS', ] specials.extend(metasymbols) def isAlpha(x): return reduce(lambda a,b:a and (b.isalpha() or b in nonterminals_alphabet),x,True) def readConfig(f): global debug cfg = ET.parse(f) for e in cfg.findall('*'): if e.tag == 'mask': if e.findall('terminal'): masked[e.findtext('token')] = e.findtext('terminal') elif e.findall('epsilon'): masked[e.findtext('token')] = 'EPSILON' else: print('Unknown masked token:',e.findtext('token')) elif e.tag == 'nonterminals-may-contain': for x in e.text: nonterminals_alphabet.append(x) elif e.tag == 'nonterminals-may-start-with': for x in e.text: nonterminals_start.append(x) elif e.tag == 'ignore': #config[e.tag] = '' for x in e.findall('*'): if x.tag == 'newline': ignore_tokens.append('\n') ignore_tokens.append('@@@0-0') elif x.tag == 'space': ignore_tokens.append(' ') elif x.tag == 'lines-containing': ignore_lines.append(x.text) elif x.tag == 'same-indentation': ignore_tokens.append('@@@1-1') else: ignore_tokens.append(x.text) elif e.tag == 'alias': for x in e.findall('*'): if x.tag not in aliases.keys(): aliases[x.tag] = [] aliases[x.tag].append(x.text) elif e.text: config[e.tag] = e.text.replace('\\n','\n') else: config[e.tag] = '' if e.tag in ('nonterminal-if-camelcase','nonterminal-if-mixedcase','nonterminal-if-uppercase','nonterminal-if-lowercase','nonterminal-if-contains','nonterminal-if-defined','decompose-symbols'): if e.text: config[e.tag] = e.text else: config[e.tag] = '' for x in e.findall('except'): always_terminals.append(x.text) if e.tag in ('terminal-if-camelcase','terminal-if-mixedcase','terminal-if-uppercase','terminal-if-lowercase','terminal-if-undefined'): config[e.tag] = '' for x in e.findall('except'): always_nonterminals.append(x.text) if debug: print('Ok',config) if __name__ == "__main__": if len(sys.argv) != 4: print('Usage:') print(' getgrammar.py input.txt config.edd output.src') sys.exit(-1) readConfig(sys.argv[2]) # default values for some metasymbols if 'terminator-symbol' not in config.keys(): config['terminator-symbol'] = ';' print('Terminator metasymbol not specified, we use the default value of ";".') if 'defining-symbol' not in config.keys(): config['defining-symbol'] = ':' print('Defining metasymbol not specified, we use the default value of ":".') f = open(sys.argv[1],'r') fragment = False lines = [] for line in f.readlines(): tline = line.strip() if fragment: lines.append(line) if tline == config['terminator-symbol']: fragment = False #lines.append('') if tline == '': print('An empty line inside a production rule: suspect that a terminal metasymbol has been forgotten.') fragment = False else: if tline[-len(config['defining-symbol']):] == config['defining-symbol'] and isAlpha(tline[:-len(config['defining-symbol'])]): #print('Found nonterminal',tline[:-len(config['defining-symbol'])]) lines.append(line) fragment = True f.close() f = open(sys.argv[3],'w') f.writelines(lines) f.close() print(len(lines),'lines extracted')
bsd-3-clause
7,472,728,045,584,283,000
28.93865
195
0.665779
false
kulbirsaini/pdfrw-fork
examples/rl1/booklet.py
1
1588
#!/usr/bin/env python ''' usage: booklet.py my.pdf Uses Form XObjects and reportlab to create booklet.my.pdf. Demonstrates use of pdfrw with reportlab. ''' import sys import os from reportlab.pdfgen.canvas import Canvas import find_pdfrw from pdfrw import PdfReader from pdfrw.buildxobj import pagexobj from pdfrw.toreportlab import makerl def read_and_double(inpfn): pages = PdfReader(inpfn, decompress=False).pages pages = [pagexobj(x) for x in pages] if len(pages) & 1: pages.append(pages[0]) # Sentinel -- get same size for back as front xobjs = [] while len(pages) > 2: xobjs.append((pages.pop(), pages.pop(0))) xobjs.append((pages.pop(0), pages.pop())) xobjs += [(x,) for x in pages] return xobjs def make_pdf(outfn, xobjpairs): canvas = Canvas(outfn) for xobjlist in xobjpairs: x = y = 0 for xobj in xobjlist: x += xobj.BBox[2] y = max(y, xobj.BBox[3]) canvas.setPageSize((x,y)) # Handle blank back page if len(xobjlist) > 1 and xobjlist[0] == xobjlist[-1]: xobjlist = xobjlist[:1] x = xobjlist[0].BBox[2] else: x = 0 y = 0 for xobj in xobjlist: canvas.saveState() canvas.translate(x, y) canvas.doForm(makerl(canvas, xobj)) canvas.restoreState() x += xobj.BBox[2] canvas.showPage() canvas.save() inpfn, = sys.argv[1:] outfn = 'booklet.' + os.path.basename(inpfn) make_pdf(outfn, read_and_double(inpfn))
mit
5,298,639,258,305,954,000
22.014493
77
0.595718
false
varunarya10/oslo.utils
oslo_utils/tests/test_reflection.py
1
8493
# -*- coding: utf-8 -*- # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base as test_base import six import testtools from oslo_utils import reflection if six.PY3: RUNTIME_ERROR_CLASSES = ['RuntimeError', 'Exception', 'BaseException', 'object'] else: RUNTIME_ERROR_CLASSES = ['RuntimeError', 'StandardError', 'Exception', 'BaseException', 'object'] def dummy_decorator(f): @six.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper def mere_function(a, b): pass def function_with_defs(a, b, optional=None): pass def function_with_kwargs(a, b, **kwargs): pass class Class(object): def method(self, c, d): pass @staticmethod def static_method(e, f): pass @classmethod def class_method(cls, g, h): pass class CallableClass(object): def __call__(self, i, j): pass class ClassWithInit(object): def __init__(self, k, l): pass class CallbackEqualityTest(test_base.BaseTestCase): def test_different_simple_callbacks(self): def a(): pass def b(): pass self.assertFalse(reflection.is_same_callback(a, b)) def test_static_instance_callbacks(self): class A(object): @staticmethod def b(a, b, c): pass a = A() b = A() self.assertTrue(reflection.is_same_callback(a.b, b.b)) def test_different_instance_callbacks(self): class A(object): def b(self): pass def __eq__(self, other): return True b = A() c = A() self.assertFalse(reflection.is_same_callback(b.b, c.b)) self.assertTrue(reflection.is_same_callback(b.b, c.b, strict=False)) class GetCallableNameTest(test_base.BaseTestCase): def test_mere_function(self): name = reflection.get_callable_name(mere_function) self.assertEqual('.'.join((__name__, 'mere_function')), name) def test_method(self): name = reflection.get_callable_name(Class.method) self.assertEqual('.'.join((__name__, 'Class', 'method')), name) def test_instance_method(self): name = reflection.get_callable_name(Class().method) self.assertEqual('.'.join((__name__, 'Class', 'method')), name) def test_static_method(self): name = reflection.get_callable_name(Class.static_method) if six.PY3: self.assertEqual('.'.join((__name__, 'Class', 'static_method')), name) else: # NOTE(imelnikov): static method are just functions, class name # is not recorded anywhere in them. self.assertEqual('.'.join((__name__, 'static_method')), name) def test_class_method(self): name = reflection.get_callable_name(Class.class_method) self.assertEqual('.'.join((__name__, 'Class', 'class_method')), name) def test_constructor(self): name = reflection.get_callable_name(Class) self.assertEqual('.'.join((__name__, 'Class')), name) def test_callable_class(self): name = reflection.get_callable_name(CallableClass()) self.assertEqual('.'.join((__name__, 'CallableClass')), name) def test_callable_class_call(self): name = reflection.get_callable_name(CallableClass().__call__) self.assertEqual('.'.join((__name__, 'CallableClass', '__call__')), name) # These extended/special case tests only work on python 3, due to python 2 # being broken/incorrect with regard to these special cases... @testtools.skipIf(not six.PY3, 'python 3.x is not currently available') class GetCallableNameTestExtended(test_base.BaseTestCase): # Tests items in http://legacy.python.org/dev/peps/pep-3155/ class InnerCallableClass(object): def __call__(self): pass def test_inner_callable_class(self): obj = self.InnerCallableClass() name = reflection.get_callable_name(obj.__call__) expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', 'InnerCallableClass', '__call__')) self.assertEqual(expected_name, name) def test_inner_callable_function(self): def a(): def b(): pass return b name = reflection.get_callable_name(a()) expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', 'test_inner_callable_function', '<locals>', 'a', '<locals>', 'b')) self.assertEqual(expected_name, name) def test_inner_class(self): obj = self.InnerCallableClass() name = reflection.get_callable_name(obj) expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', 'InnerCallableClass')) self.assertEqual(expected_name, name) class GetCallableArgsTest(test_base.BaseTestCase): def test_mere_function(self): result = reflection.get_callable_args(mere_function) self.assertEqual(['a', 'b'], result) def test_function_with_defaults(self): result = reflection.get_callable_args(function_with_defs) self.assertEqual(['a', 'b', 'optional'], result) def test_required_only(self): result = reflection.get_callable_args(function_with_defs, required_only=True) self.assertEqual(['a', 'b'], result) def test_method(self): result = reflection.get_callable_args(Class.method) self.assertEqual(['self', 'c', 'd'], result) def test_instance_method(self): result = reflection.get_callable_args(Class().method) self.assertEqual(['c', 'd'], result) def test_class_method(self): result = reflection.get_callable_args(Class.class_method) self.assertEqual(['g', 'h'], result) def test_class_constructor(self): result = reflection.get_callable_args(ClassWithInit) self.assertEqual(['k', 'l'], result) def test_class_with_call(self): result = reflection.get_callable_args(CallableClass()) self.assertEqual(['i', 'j'], result) def test_decorators_work(self): @dummy_decorator def special_fun(x, y): pass result = reflection.get_callable_args(special_fun) self.assertEqual(['x', 'y'], result) class AcceptsKwargsTest(test_base.BaseTestCase): def test_no_kwargs(self): self.assertEqual(False, reflection.accepts_kwargs(mere_function)) def test_with_kwargs(self): self.assertEqual(True, reflection.accepts_kwargs(function_with_kwargs)) class GetClassNameTest(test_base.BaseTestCase): def test_std_exception(self): name = reflection.get_class_name(RuntimeError) self.assertEqual('RuntimeError', name) def test_class(self): name = reflection.get_class_name(Class) self.assertEqual('.'.join((__name__, 'Class')), name) def test_instance(self): name = reflection.get_class_name(Class()) self.assertEqual('.'.join((__name__, 'Class')), name) def test_int(self): name = reflection.get_class_name(42) self.assertEqual('int', name) class GetAllClassNamesTest(test_base.BaseTestCase): def test_std_class(self): names = list(reflection.get_all_class_names(RuntimeError)) self.assertEqual(RUNTIME_ERROR_CLASSES, names) def test_std_class_up_to(self): names = list(reflection.get_all_class_names(RuntimeError, up_to=Exception)) self.assertEqual(RUNTIME_ERROR_CLASSES[:-2], names)
apache-2.0
-899,403,400,105,379,200
29.44086
79
0.601554
false
yukaritan/qtbot3
qtbot3_service/plugins/achievements.py
1
2926
from util import irc from util.garbage import rainbow from util.handler_utils import prehook, get_value, set_value, get_target, cmdhook, fetch_all from qtbot3_common.types.message import Message disconnection_ladder = { 1: "Connection reset by peer", 5: "Connection reset by beer", 10: "Connection reset by queer", 25: "Connection reset by Cher", 50: "Connection reset by ...deer?", 100: "Connection reset by ... enough already. I don't know.. Gears?", 250: "Connection reset 250 times. Seriously?", 500: "You've lost your connection 500 times. Do you even internet?", 1000: "One thousand disconnects. A thousand. One, three zeros. Holy shit." } def get_achievement(message: Message, match: dict, nick: str, count: int) -> str: print("Achievement progress for {user}: {count}".format(count=count, **match)) if count in disconnection_ladder: print("Dealt achievement \"" + disconnection_ladder[count] + "\" to", match['nick']) if not 'target' in match or match['target'] is None: return target = get_target(message, nick) msg = "{nick} has unlocked an achievement: {desc}" msg = rainbow(msg.format(nick=match['nick'], desc=disconnection_ladder[count])) return irc.chat_message(target, msg) return None @prehook(':(?P<nick>[^\s]+)' '!(?P<user>[^\s]+)' ' QUIT' '( :(?P<message>.*))?') @prehook(':(?P<nick>[^\s]+)' '!(?P<user>[^\s]+)' ' PART' ' (?P<target>[^\s]+)' '( :(?P<message>.*))?') def achievement_prehook_part(message: Message, match: dict, nick: str): try: key = 'chiev_partcount_' + match['user'] print("old value:", get_value(key)) count = (get_value(key) or 0) + 1 print("new value:", count) set_value(key, count) return get_achievement(message, match, nick, count) except Exception as ex: print("achievement prehook exception:", ex) @prehook(':(?P<nick>[^\s]+)' '!(?P<user>[^\s]+)' ' JOIN' ' (?P<target>[^\s]+)') def achievement_prehook_join(message: Message, match: dict, nick: str): try: key = 'chiev_partcount_' + match['user'] count = get_value(key) or 0 return get_achievement(message, match, nick, count) except Exception as ex: print("achievement prehook exception:", ex) @cmdhook('aimbot (?P<nick>[^\s]+)') def achievement_cheat_codes(message: Message, match: dict, nick: str) -> str: fetched = fetch_all(keyfilter='user_', valuefilter=match['nick']) target = get_target(message, nick) output = [] for key in fetched: user = key.split('_', 1)[1] key = 'chiev_partcount_' + user count = get_value(key) or 0 msg = rainbow("%s has disconnected %d times" % (user, count)) output.append(irc.chat_message(target, msg)) return output
gpl-3.0
-309,478,902,671,180,200
32.25
92
0.598086
false
CobwebOrg/cobweb-django
core/migrations/0014_auto_20181026_1019.py
1
1216
# Generated by Django 2.1.2 on 2018-10-26 17:19 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('core', '0013_user_terms_accepted'), ] operations = [ migrations.AddField( model_name='organization', name='created_at', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='organization', name='updated_at', field=models.DateTimeField(auto_now=True), ), migrations.AddField( model_name='user', name='created_at', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='user', name='updated_at', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='user', name='terms_accepted', field=models.BooleanField(default=False), ), ]
mit
243,347,793,550,178,400
28.658537
93
0.570724
false
lmorchard/django-teamwork
teamwork/templatetags/teamwork_tags.py
1
3741
""" ``django-teamwork`` template tags, loaded like so: {% load teamwork_tags %} """ from __future__ import unicode_literals from django import template from django.contrib.auth import get_user_model from django.contrib.auth.models import Group, AnonymousUser from django.template import get_library from django.template import InvalidTemplateLibrary from django.template.defaulttags import LoadNode from ..shortcuts import build_policy_admin_links register = template.Library() class ObjectPermissionsNode(template.Node): def __init__(self, user_var, obj, context_var): self.user_var = template.Variable(user_var) self.obj = template.Variable(obj) self.context_var = context_var def render(self, context): user_var = self.user_var.resolve(context) if isinstance(user_var, get_user_model()): self.user = user_var elif isinstance(user_var, AnonymousUser): self.user = user_var else: raise Exception("User instance required (got %s)" % user_var.__class__) obj = self.obj.resolve(context) perms = self.user.get_all_permissions(obj) context[self.context_var] = perms return '' @register.tag def get_all_obj_permissions(parser, token): """ Get all of a user's permissions granted by an object. For example: {% get_all_obj_permissions user for obj as "context_var" %} """ bits = token.split_contents() format = '{% get_all_obj_permissions user for obj as "context_var" %}' if len(bits) != 6 or bits[2] != 'for' or bits[4] != 'as': raise template.TemplateSyntaxError("get_all_permissions tag should be in " "format: %s" % format) _, user_var, _, obj, _, context_var = bits if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"): raise template.TemplateSyntaxError( "get_all_obj_permissions tag's context_var argument should be " "quoted") context_var = context_var[1:-1] return ObjectPermissionsNode(user_var, obj, context_var) class PolicyAdminLinksNode(template.Node): def __init__(self, user_var, obj, context_var): self.user_var = template.Variable(user_var) self.obj = template.Variable(obj) self.context_var = context_var def render(self, context): user_var = self.user_var.resolve(context) if isinstance(user_var, get_user_model()): self.user = user_var elif isinstance(user_var, AnonymousUser): self.user = user_var else: raise Exception("User instance required (got %s)" % user_var.__class__) obj = self.obj.resolve(context) links = build_policy_admin_links(self.user, obj) context[self.context_var] = links return '' @register.tag def get_policy_admin_links(parser, token): """ Get a set of links to admin pages to manage policy for an object by a user {% policy_admin_links user for obj as "context_var" %} """ bits = token.split_contents() format = '{% policy_admin_links user for obj as "context_var" %}' if len(bits) != 6 or bits[2] != 'for' or bits[4] != 'as': raise template.TemplateSyntaxError("get_all_permissions tag should be in " "format: %s" % format) _, user_var, _, obj, _, context_var = bits if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"): raise template.TemplateSyntaxError( "policy_admin_links tag's context_var argument should be " "quoted") context_var = context_var[1:-1] return PolicyAdminLinksNode(user_var, obj, context_var)
mpl-2.0
7,748,176,117,772,263,000
31.25
82
0.624967
false
summychou/TBTracker
src/TBTracker_Gui/TBTracker_Gui_Button.py
1
4941
# -*- coding: utf-8 -*- from PyQt5.QtCore import QCoreApplication from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QPushButton ''' @author : Zhou Jian @email : [email protected] @version : V1.1 @date : 2018.04.22 ''' class BaseButton(QPushButton): ''' 基类按钮 ''' def __init__(self, name=""): super(BaseButton, self).__init__(name) class SearchButton(BaseButton): ''' 搜素按钮,继承自基类按钮 ''' def __init__(self): super(SearchButton, self).__init__(name="商品搜索") self.function_init() # 功能绑定 - def function_init(self): pass class AddButton(BaseButton): ''' 添加标签按钮,继承自基类按钮 ''' def __init__(self): super(AddButton, self).__init__(name="添加标签") self.function_init() # 功能绑定 - def function_init(self): pass class AttachButton(BaseButton): ''' 标注标签按钮,继承自基类按钮 ''' def __init__(self): super(AttachButton, self).__init__(name="标注标签") self.function_init() # 功能绑定 - def function_init(self): pass class ImportButton(BaseButton): ''' 导入数据按钮,继承自基类按钮 ''' def __init__(self): super(ImportButton, self).__init__(name="导入数据") self.function_init() # 功能绑定 - def function_init(self): pass class ExportButton(BaseButton): ''' 导出数据按钮,继承自基类按钮 ''' def __init__(self): super(ExportButton, self).__init__(name="导出数据") self.function_init() # 功能绑定 - def function_init(self): pass class InsertButton(BaseButton): ''' 添加数据按钮,继承自基类按钮 ''' def __init__(self): super(InsertButton, self).__init__(name="添加数据") self.function_init() # 功能绑定 - def function_init(self): pass class DeleteButton(BaseButton): ''' 删除数据按钮,继承自基类按钮 ''' def __init__(self): super(DeleteButton, self).__init__(name="删除数据") self.function_init() # 功能绑定 - def function_init(self): pass class ConfirmButton(BaseButton): ''' 确定按钮,继承自基类按钮 ''' def __init__(self): super(ConfirmButton, self).__init__(name="确定") self.function_init() # 功能绑定 - def function_init(self): pass class CancelButton(BaseButton): ''' 取消按钮,继承自基类按钮 ''' def __init__(self): super(CancelButton, self).__init__(name="取消") self.function_init() # 功能绑定 - def function_init(self): pass class GlobalSelectButton(BaseButton): ''' 全局按钮,继承自基类按钮 ''' def __init__(self): super(GlobalSelectButton, self).__init__(name="全局选择") self.function_init() # 功能绑定 - def function_init(self): pass class AllSelectButton(BaseButton): ''' 全选按钮,继承自基类按钮 ''' def __init__(self): super(AllSelectButton, self).__init__(name="全部选择") self.function_init() # 功能绑定 - def function_init(self): pass class ChangeConfigButton(BaseButton): ''' 更改配置按钮,继承自基类按钮 ''' def __init__(self): super(ChangeConfigButton, self).__init__(name="更改配置") self.function_init() # 功能绑定 - def function_init(self): pass class ManualUpdateButton(BaseButton): ''' 手动更新按钮,继承自基类按钮 ''' def __init__(self): super(ManualUpdateButton, self).__init__(name="手动更新") self.function_init() # 功能绑定 - def function_init(self): pass class SelectCommodityButton(BaseButton): ''' 选择商品按钮,继承自基类按钮 ''' def __init__(self): super(SelectCommodityButton, self).__init__(name="选择商品") self.function_init() # 功能绑定 - def function_init(self): pass class MonthlyDataButton(BaseButton): ''' 月份数据按钮,继承自基类按钮 ''' def __init__(self): super(MonthlyDataButton, self).__init__(name="月份数据") self.function_init() # 功能绑定 - def function_init(self): pass class YearlyDataButton(BaseButton): ''' 年份数据按钮,继承自基类按钮 ''' def __init__(self): super(YearlyDataButton, self).__init__(name="年份数据") self.function_init() # 功能绑定 - def function_init(self): pass
mit
4,804,963,411,780,969,000
17.836283
64
0.547099
false
AlexStarov/Shop
applications/discount/management/commands/processing_actions.py
1
6004
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand from applications.product.models import Category, Product from applications.discount.models import Action __author__ = 'Alex Starov' class Command(BaseCommand, ): def handle(self, *args, **options): try: action_category = Category.objects.get(url=u'акции', ) except Category.DoesNotExist: action_category = False """ Выключаем продукты из "АКЦИИ" срок действия акции которой уже подощёл к концу """ action_not_active = Action.objects.not_active() if action_not_active: print 'Action - NOT ACTIVE:', action_not_active for action in action_not_active: products_of_action = action.product_in_action.all() print 'All products:', products_of_action """ Если акция с авто окончанием, то заканчиваем еЁ. """ if action.auto_end: products_of_action = action.product_in_action.in_action() if len(products_of_action, ) > 0: print 'Product auto_end:', products_of_action for product in products_of_action: print 'Del product from Action: ', product """ Помечает товар как не учавствующий в акции """ if action_category: product.category.remove(action_category, ) product.in_action = False if action.auto_del_action_from_product: if action_category: product.action.remove(action, ) product.save() if action.auto_del: action.deleted = True action.save() action_active = Action.objects.active() if action_active: print 'Action - ACTIVE:', action_active for action in action_active: products_of_action = action.product_in_action.all() print 'All products:', products_of_action """ Если акция с автостартом, то мы еЁ стартуем. """ if action.auto_start: """ Включаем галочку 'Учавствует в акции' всем продуктам которые внесены в акцию исключая продукты 'отсутсвующие на складе' """ products_of_action = action.product_in_action.exclude(is_availability=4, ) if len(products_of_action, ) > 0: print 'Product auto_start:', products_of_action for product in products_of_action: """ Помечает товар как учавствующий в акции """ product.in_action = True """ Добавляем категорию 'Акция' в товар """ if action_category: product.category.add(action_category, ) product.save() """ Удаляем товары учавствующие в активной акции но при этом 'отсутсвующие на складе' """ products_remove_from_action = action.product_in_action.exclude(is_availability__lt=4, ) if len(products_remove_from_action, ) > 0: print 'Product auto_start remove:', products_remove_from_action for product in products_remove_from_action: """ Помечает товар как не учавствующий в акции """ product.in_action = False """ Удаляем категорию 'Акция' из товара """ if action_category: product.category.remove(action_category, ) product.save() """ Убираем галочку 'участвует в акции' всем продуктам у которых она почемуто установлена, но при этом отсутвует хоть какая то акция """ products = Product.objects.filter(in_action=True, action=None, ).update(in_action=False, ) print 'Товары удаленные из акции по причине вывода их из акции: ', products """ Убираем галочку 'участвует в акции' всем продуктам которые отсутсвуют на складе """ products = Product.objects.filter(in_action=True, is_availability=4, ).update(in_action=False, ) print 'Товары удаленные из акции по причине отсутсвия на складе: ', products """ Делаем активной акционную категорию, если есть хоть один акционный товар """ all_actions_products = action_category.products.all() if len(all_actions_products) != 0 and not action_category.is_active: action_category.is_active = True action_category.save() elif len(all_actions_products) == 0 and action_category.is_active: action_category.is_active = False action_category.save()
apache-2.0
-2,092,252,847,127,148,000
51.525253
109
0.527115
false
alfredhq/alfred
alfred/__main__.py
1
1238
#!/usr/bin/env python import os from argh import arg, ArghParser from argh.exceptions import CommandError from functools import wraps CONFIG = os.environ.get('ALFRED_CONFIG') def with_app(func): @wraps(func) @arg('--config', help='path to config') def wrapper(args): from alfred import create_app if not CONFIG and not args.config: raise CommandError('There is no config file specified') app = create_app(args.config or CONFIG) return func(app, args) return wrapper @arg('--host', default='127.0.0.1', help='the host') @arg('--port', default=5000, help='the port') @arg('--noreload', action='store_true', help='disable code reloader') @with_app def runserver(app, args): app.run(args.host, args.port, use_reloader=not args.noreload) @with_app def shell(app, args): from alfred.helpers import get_shell with app.test_request_context(): sh = get_shell() sh(app=app) @with_app def collectassets(app, args): from alfred.assets import gears gears.get_environment(app).save() def main(): parser = ArghParser() parser.add_commands([runserver, shell, collectassets]) parser.dispatch() if __name__ == '__main__': main()
isc
8,186,954,873,750,227,000
22.358491
69
0.659128
false
javiroman/rlink
dbconnector/dbconnector_test.py
1
2705
#!/usr/bin/env python """ (C) 2008-2009 Javi Roman <[email protected]> $Id$ """ import sys import traceback import Ice import socket import signal import syslog import time import os # # Important sanity tests. # slice_dir = os.getenv('ICEPY_HOME', '') if len(slice_dir) == 0 or not os.path.exists(os.path.join(slice_dir, 'slice')): slice_dir = os.getenv('ICE_HOME', '') if len(slice_dir) == 0 or not os.path.exists(os.path.join(slice_dir, 'slice')): slice_dir = os.path.join('/', 'usr', 'share') if not os.path.exists(os.path.join(slice_dir, 'slice')): print sys.argv[0] + ': Slice directory not found. Define ICEPY_HOME or ICE_HOME.' sys.exit(1) MAIN_CONFIG_DIR = os.getcwd() CONFIG_FILE = MAIN_CONFIG_DIR + "/" + "dbconnector_test.cfg" VERSION="0.0.1" Ice.loadSlice('-I' + slice_dir + '/slice ' + MAIN_CONFIG_DIR + "/" + "DBConnector.ice") import DBConnector class AppRlinkDBConnectorClient(Ice.Application): def __init__(self): print "<AppRlinkDBConnectorClient constructor>" def run(self, args): print "<Ice runtime running>" ic = self.communicator() properties = ic.getProperties() proxy = properties.getProperty('DBConnector.Proxy') if len(proxy) == 0: print " property `DBConnector.Proxy' not set" return False try: servant = \ DBConnector.DBInsertionPrx.checkedCast(ic.stringToProxy(proxy)) except Ice.NotRegisteredException: print "Execpcion no registrado!!!" traceback.print_exc() return -1 servant.pingTest("Hello World DBConnector!") if ic: try: self.communicator().destroy() except: traceback.print_exc() status=1 return 0 def main(): print "-------------------------------------------------------" version = "DBConnector Tester version %s" % VERSION print version print "-------------------------------------------------------" app = AppRlinkDBConnectorClient() if not os.path.exists(CONFIG_FILE): print "main configuration file missing ..." return 1 else: """ To overwrite Ice system variables we've to initialize InitializationData structure, before Ice initialization. The config_file (second parameter to app.main, has to be setted up to None. """ id = Ice.InitializationData() id.properties = Ice.createProperties() id.properties.load(CONFIG_FILE) return (app.main(sys.argv, None, id)) if __name__ == "__main__": sys.exit(main()) # vim: ts=4:sw=4:et:sts=4:ai:tw=80
gpl-2.0
-4,307,590,568,236,622,300
26.886598
87
0.586691
false
pragmaticcoders/horse
tests/integration/recommendation/test_smart_service.py
1
3551
from horse.models import User, Movie import pytest @pytest.fixture def service(app): return app.ctx.recommendations.smart def assert_recommendations(result, expected): def readable(lst): return [(item.pk, item.title) for item in lst] result_movies = [movie for (movie, weight) in result] assert readable(result_movies) == readable(expected) def test_liked_movie_is_not_included(service, movies_repo): user = User('root') other = User('other') movie_a = Movie('a') movie_b = Movie('b') movies_repo.store(movie_a) movies_repo.store(movie_b) user.add_to_liked_movies(movie_a) other.add_to_liked_movies(movie_b) result = service.recommend(user) assert_recommendations(result, [movie_b]) def test_followed_users_movie_is_more_influential(service, movies_repo): user = User('root') other_user = User('other') followed_user = User('followed') movie_a = Movie('a') movie_b = Movie('b') movies_repo.store(movie_a) movies_repo.store(movie_b) user.add_to_followed_users(followed_user) followed_user.add_to_liked_movies(movie_a) other_user.add_to_liked_movies(movie_b) result = service.recommend(user) assert_recommendations(result, [movie_a, movie_b]) def test_nested_follows_are_more_influential(service, movies_repo): movie_a = Movie('a') movie_b = Movie('b') movies_repo.store(movie_b) movies_repo.store(movie_a) user = User('root') followed_user_1 = User('followed 1') followed_user_2 = User('followed 2') user.add_to_followed_users(followed_user_1) user.add_to_followed_users(followed_user_2) followed_user_2.add_to_followed_users(followed_user_1) followed_user_1.add_to_liked_movies(movie_a) followed_user_2.add_to_liked_movies(movie_b) result = service.recommend(user) assert_recommendations(result, [movie_a, movie_b]) def test_similar_users_are_more_influential(service, movies_repo): movie_a = Movie('a') movie_b = Movie('b') movie_c = Movie('c') movies_repo.store(movie_a) movies_repo.store(movie_b) movies_repo.store(movie_c) user = User('root') followed_user_1 = User('followed 1') followed_user_2 = User('followed 2') user.add_to_followed_users(followed_user_1) user.add_to_followed_users(followed_user_2) followed_user_1.add_to_liked_movies(movie_a) followed_user_2.add_to_liked_movies(movie_b) followed_user_2.add_to_liked_movies(movie_c) # User shares a common movie with followed_user_2 user.add_to_liked_movies(movie_c) result = service.recommend(user) assert_recommendations(result, [movie_b, movie_a]) def test_globally_liked_movies_are_more_influential(service, movies_repo): user = User('root') movie_a = Movie('a') movie_b = Movie('b') movie_c = Movie('c') movies_repo.store(movie_a) movies_repo.store(movie_b) movies_repo.store(movie_c) user_a = User('a') user_b = User('b') user_a.add_to_liked_movies(movie_a) user_a.add_to_liked_movies(movie_b) user_b.add_to_liked_movies(movie_b) result = service.recommend(user) assert_recommendations(result, [movie_b, movie_a]) def test_movie_without_likes_is_not_recommended(service, movies_repo): user = User('root') movie_a = Movie('a') movie_b = Movie('b') movies_repo.store(movie_a) movies_repo.store(movie_b) user.add_to_liked_movies(movie_b) result = service.recommend(user) assert_recommendations(result, [])
mit
8,766,053,740,478,384,000
23.489655
74
0.666854
false
eliksir/mailmojo-python-sdk
test/test_newsletter_api.py
1
1774
# coding: utf-8 """ MailMojo API v1 of the MailMojo API # noqa: E501 OpenAPI spec version: 1.1.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import mailmojo_sdk from mailmojo_sdk.api.newsletter_api import NewsletterApi # noqa: E501 from mailmojo_sdk.rest import ApiException class TestNewsletterApi(unittest.TestCase): """NewsletterApi unit test stubs""" def setUp(self): self.api = mailmojo_sdk.api.newsletter_api.NewsletterApi() # noqa: E501 def tearDown(self): pass def test_cancel_newsletter(self): """Test case for cancel_newsletter Cancel a newsletter. # noqa: E501 """ pass def test_create_newsletter(self): """Test case for create_newsletter Create a newsletter draft. # noqa: E501 """ pass def test_get_newsletter_by_id(self): """Test case for get_newsletter_by_id Retrieve a newsletter by id. # noqa: E501 """ pass def test_get_newsletters(self): """Test case for get_newsletters Retrieve all newsletters. # noqa: E501 """ pass def test_send_newsletter(self): """Test case for send_newsletter Send a newsletter. # noqa: E501 """ pass def test_test_newsletter(self): """Test case for test_newsletter Send a test newsletter. # noqa: E501 """ pass def test_update_newsletter(self): """Test case for update_newsletter Update a newsletter draft partially. # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
apache-2.0
-50,909,709,508,085,050
20.373494
80
0.60372
false
d120/pyfeedback
src/feedback/migrations/0043_auto_20190618_2221.py
1
8403
# -*- coding: utf-8 -*- # Generated by Django 1.11.21 on 2019-06-18 22:21 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('feedback', '0042_auto_20180608_1423'), ] operations = [ migrations.CreateModel( name='FragebogenUE2016', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fach', models.CharField(blank=True, choices=[('inf', 'Informatik'), ('math', 'Mathematik'), ('ce', 'Computational Engineering'), ('ist', 'Informationssystemtechnik'), ('etit', 'Elektrotechnik'), ('psyit', 'Psychologie in IT'), ('winf', 'Wirtschaftsinformatik'), ('sonst', 'etwas anderes')], max_length=5)), ('abschluss', models.CharField(blank=True, choices=[('bsc', 'Bachelor'), ('msc', 'Master'), ('dipl', 'Diplom'), ('lehr', 'Lehramt'), ('sonst', 'anderer Abschluss')], max_length=5)), ('semester', models.CharField(blank=True, choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '>=10')], max_length=4)), ('geschlecht', models.CharField(blank=True, choices=[('w', 'weiblich'), ('m', 'männlich'), ('s', 'sonstiges')], max_length=1)), ('studienberechtigung', models.CharField(blank=True, choices=[('d', 'Deutschland'), ('o', 'anderes Land')], max_length=1)), ('ue_wie_oft_besucht', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_besuch_ueberschneidung', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_besuch_qualitaet', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_besuch_verhaeltnisse', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_besuch_privat', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_besuch_elearning', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_besuch_zufrueh', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_besuch_sonstiges', models.CharField(blank=True, choices=[('j', 'ja'), ('n', 'nein')], max_length=1)), ('ue_3_1', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_2', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_3', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_4', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_5', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_6', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_7', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_3_8', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_1', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_2', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_3', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_4', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_5', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_6', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_7', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_8', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_9', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_4_10', models.CharField(blank=True, max_length=1)), ('ue_4_11', models.CharField(blank=True, max_length=1)), ('kennziffer', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_1', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_2', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_3', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_4', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_5', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_6', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_7', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_8', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_9', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_10', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_11', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_12', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_13', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_14', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_15', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_5_16', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_6_1', models.CharField(blank=True, choices=[('0', '0'), ('1', '0.5'), ('2', '1'), ('3', '2'), ('4', '3'), ('5', '4'), ('6', '5'), ('7', '>=5')], max_length=1)), ('ue_6_2', models.PositiveSmallIntegerField(blank=True, null=True)), ('ue_6_3', models.PositiveSmallIntegerField(blank=True, null=True)), ('veranstaltung', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feedback.Veranstaltung')), ], options={ 'verbose_name': 'Übungsfragebogen 2016', 'verbose_name_plural': 'Übungfragebögen 2016', 'ordering': ['semester', 'veranstaltung'], }, ), migrations.AddField( model_name='ergebnis2016', name='ue_arbeitsbedingungen', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='ergebnis2016', name='ue_arbeitsbedingungen_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='ergebnis2016', name='ue_didaktik', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='ergebnis2016', name='ue_didaktik_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='ergebnis2016', name='ue_feedbackpreis', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='ergebnis2016', name='ue_feedbackpreis_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='ergebnis2016', name='ue_lernerfolg', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='ergebnis2016', name='ue_lernerfolg_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='ergebnis2016', name='ue_organisation', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='ergebnis2016', name='ue_organisation_count', field=models.PositiveIntegerField(default=0), ), migrations.AddField( model_name='ergebnis2016', name='ue_umgang', field=models.FloatField(blank=True, null=True), ), migrations.AddField( model_name='ergebnis2016', name='ue_umgang_count', field=models.PositiveIntegerField(default=0), ), ]
agpl-3.0
591,802,522,073,656,600
58.992857
324
0.57233
false
mskala/birdie
birdieapp/utils/media.py
1
4300
# -*- coding: utf-8 -*- # Copyright (C) 2013-2014 Ivo Nunes/Vasco Nunes # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from PIL import Image, ImageDraw from gi.repository import GdkPixbuf from birdieapp.constants import BIRDIE_CACHE_PATH import StringIO import os def resize_and_crop(img, size, crop_type='middle'): """ Resize and crop an image to fit the specified size. """ # Get current and desired ratio for the images img_ratio = img.size[0] / float(img.size[1]) ratio = size[0] / float(size[1]) # The image is scaled/cropped vertically or horizontally depending on the # ratio if ratio > img_ratio: img = img.resize( (size[0], size[0] * img.size[1] / img.size[0]), Image.ANTIALIAS) # Crop in the top, middle or bottom if crop_type == 'top': box = (0, 0, img.size[0], size[1]) elif crop_type == 'middle': box = (0, (img.size[1] - size[1]) / 2, img.size[ 0], (img.size[1] + size[1]) / 2) elif crop_type == 'bottom': box = (0, img.size[1] - size[1], img.size[0], img.size[1]) else: raise ValueError('ERROR: invalid value for crop_type') img = img.crop(box) elif ratio < img_ratio: img = img.resize( (size[1] * img.size[0] / img.size[1], size[1]), Image.ANTIALIAS) # Crop in the top, middle or bottom if crop_type == 'top': box = (0, 0, size[0], img.size[1]) elif crop_type == 'middle': box = ((img.size[0] - size[0]) / 2, 0, ( img.size[0] + size[0]) / 2, img.size[1]) elif crop_type == 'bottom': box = (img.size[0] - size[0], 0, img.size[0], img.size[1]) else: raise ValueError('ERROR: invalid value for crop_type') img = img.crop(box) else: img = img.resize((size[0], size[1]), Image.ANTIALIAS) return img def cropped_thumbnail(img): """Creates a centered cropped thumbnail GdkPixbuf of given image""" # thumbnail and crop try: im = Image.open(img) im = im.convert('RGBA') im = resize_and_crop(im, (318, 120)) # Convert to GdkPixbuf buff = StringIO.StringIO() im.save(buff, 'ppm') contents = buff.getvalue() buff.close() loader = GdkPixbuf.PixbufLoader.new_with_type('pnm') loader.write(contents) pixbuf = loader.get_pixbuf() loader.close() return pixbuf except IOError: print("Invalid image file %s"%img) try: os.remove(img) except IOError: pass return None def fit_image_screen(img, widget): pixbuf = GdkPixbuf.Pixbuf.new_from_file(img) screen_h = widget.get_screen().get_height() screen_w = widget.get_screen().get_width() if pixbuf.get_height() >= screen_h - 100: factor = float(pixbuf.get_width()) / pixbuf.get_height() new_width = factor * (screen_h - 100) pixbuf = pixbuf.scale_simple( new_width, screen_h - 100, GdkPixbuf.InterpType.BILINEAR) return pixbuf if pixbuf.get_width() >= screen_w: factor = float(pixbuf.get_height()) / pixbuf.get_width() new_height = factor * (screen_w - 100) pixbuf.scale_simple( screen_w - 100, new_height, GdkPixbuf.InterType.BILINEAR) return pixbuf return pixbuf def simple_resize(img_path, w, h): try: im = Image.open(img_path) img = im.resize((w, h), Image.ANTIALIAS) dest = BIRDIE_CACHE_PATH + os.path.basename(img_path) + ".jpg" img.save(dest) return dest except IOError: return None
gpl-3.0
2,320,095,578,948,772,000
33.126984
77
0.594651
false
wenxichen/tensorflow_yolo2
src/img_dataset/ilsvrc2017_cls.py
1
7175
"""ILSVRC 2017 Classicifation Dataset. DEPRECATED version. For the purpose of keeping history only. Use ilsvrc2017_cls_multithread.py instead. """ import os import cv2 import numpy as np import random import config as cfg class ilsvrc_cls: def __init__(self, image_set, rebuild=False, data_aug=True): self.name = 'ilsvrc_2017' self.devkit_path = cfg.ILSVRC_PATH self.data_path = self.devkit_path self.cache_path = cfg.CACHE_PATH self.batch_size = cfg.BATCH_SIZE self.image_size = cfg.IMAGE_SIZE self.image_set = image_set self.rebuild = rebuild self.data_aug = data_aug self.cursor = 0 self.load_classes() # self.gt_labels = None assert os.path.exists(self.devkit_path), \ 'VOCdevkit path does not exist: {}'.format(self.devkit_path) assert os.path.exists(self.data_path), \ 'Path does not exist: {}'.format(self.data_path) self.prepare() def prepare(self): """Create a list of ground truth that includes input path and label. """ if (self.image_set == "train"): imgset_fname = "train_cls.txt" else: imgset_fname = self.image_set + ".txt" imgset_file = os.path.join( self.data_path, 'ImageSets', 'CLS-LOC', imgset_fname) print('Processing gt_labels using ' + imgset_file) gt_labels = [] with open(imgset_file, 'r') as f: for line in f.readlines(): img_path = line.strip().split()[0] label = self.class_to_ind[img_path.split("/")[0]] imname = os.path.join( self.data_path, 'Data', 'CLS-LOC', self.image_set, img_path + ".JPEG") gt_labels.append( {'imname': imname, 'label': label}) random.shuffle(gt_labels) self.gt_labels = gt_labels def load_classes(self): """Use the folder name to get labels.""" if (self.image_set == "train"): img_folder = os.path.join( self.data_path, 'Data', 'CLS-LOC', 'train') print('Loading class info from ' + img_folder) self.classes = [item for item in os.listdir(img_folder) if os.path.isdir(os.path.join(img_folder, item))] self.num_class = len(self.classes) assert (self.num_class == 1000), "number of classes is not 1000!" self.class_to_ind = dict( list(zip(self.classes, list(range(self.num_class))))) def get(self): """Get shuffled images and labels according to batchsize. Return: images: 4D numpy array labels: 1D numpy array """ images = np.zeros( (self.batch_size, self.image_size, self.image_size, 3)) labels = np.zeros(self.batch_size) count = 0 while count < self.batch_size: imname = self.gt_labels[self.cursor]['imname'] images[count, :, :, :] = self.image_read(imname, data_aug=self.data_aug) labels[count] = self.gt_labels[self.cursor]['label'] count += 1 self.cursor += 1 if self.cursor >= len(self.gt_labels): random.shuffle(self.gt_labels) self.cursor = 0 return images, labels def image_read(self, imname, data_aug=False): image = cv2.imread(imname) ##################### # Data Augmentation # ##################### if data_aug: flip = bool(random.getrandbits(1)) rotate_deg = random.randint(0, 359) # 75% chance to do random crop # another 25% change in maintaining input at 224x224 # this help simplify the input processing for test, val # TODO: can make multiscale test input later random_crop_chance = random.randint(0, 3) too_small = False color_pert = bool(random.getrandbits(1)) if flip: image = image[:, ::-1, :] # assume color image rows, cols, _ = image.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_deg, 1) image = cv2.warpAffine(image, M, (cols, rows)) # color perturbation if color_pert: hue_shift_sign = bool(random.getrandbits(1)) hue_shift = random.randint(0, 10) saturation_shift_sign = bool(random.getrandbits(1)) saturation_shift = random.randint(0, 10) hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # TODO: currently not sure what cv2 does to values # that are larger than the maximum. # It seems it does not cut at the max # nor normalize the whole by multiplying a factor. # need to expore this in more detail if hue_shift_sign: hsv[:, :, 0] += hue_shift else: hsv[:, :, 0] -= hue_shift if saturation_shift_sign: hsv[:, :, 1] += saturation_shift else: hsv[:, :, 1] -= saturation_shift image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) # random crop if random_crop_chance > 0: # current random crop upbound is 292 (1.3 x 224) short_side_len = random.randint( self.image_size, cfg.RAND_CROP_UPBOUND) short_side = min([cols, rows]) if short_side == cols: scaled_cols = short_side_len factor = float(short_side_len) / cols scaled_rows = int(rows * factor) else: scaled_rows = short_side_len factor = float(short_side_len) / rows scaled_cols = int(cols * factor) # print "scaled_cols and rows:", scaled_cols, scaled_rows if scaled_cols < 224 or scaled_rows < 224: too_small = True print "Image is too small,", imname else: image = cv2.resize(image, (scaled_cols, scaled_rows)) col_offset = random.randint(0, scaled_cols - self.image_size) row_offset = random.randint(0, scaled_rows - self.image_size) # print "col_offset and row_offset:", col_offset, row_offset image = image[row_offset:self.image_size + row_offset, col_offset:self.image_size + col_offset] # assuming still using image size 224x224 # print "image shape is", image.shape if random_crop_chance == 0 or too_small: image = cv2.resize(image, (self.image_size, self.image_size)) else: image = cv2.resize(image, (self.image_size, self.image_size)) image = image.astype(np.float32) image = (image / 255.0) * 2.0 - 1.0 return image
mit
-763,985,578,201,332,100
39.767045
90
0.522509
false
oss/rutgers-repository-utils
lib/repoclosure.py
1
11619
#!/usr/bin/python -t # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # seth vidal 2005 (c) etc etc #Read in the metadata of a series of repositories and check all the # dependencies in all packages for resolution. Print out the list of # packages with unresolved dependencies import sys import os import logging import yum import yum.Errors from yum.misc import getCacheDir from optparse import OptionParser import rpmUtils.arch import rpmUtils.updates from yum.constants import * from yum.packageSack import ListPackageSack def parseArgs(): usage = """ Read in the metadata of a series of repositories and check all the dependencies in all packages for resolution. Print out the list of packages with unresolved dependencies %s [-c <config file>] [-a <arch>] [-l <lookaside>] [-r <repoid>] [-r <repoid2>] """ % sys.argv[0] parser = OptionParser(usage=usage) parser.add_option("-c", "--config", default='/etc/yum.conf', help='config file to use (defaults to /etc/yum.conf)') parser.add_option("-a", "--arch", default=[], action='append', help='check packages of the given archs, can be specified multiple ' + 'times (default: current arch)') parser.add_option("--basearch", default=None, help="set the basearch for yum to run as") parser.add_option("-b", "--builddeps", default=False, action="store_true", help='check build dependencies only (needs source repos enabled)') parser.add_option("-l", "--lookaside", default=[], action='append', help="specify a lookaside repo id to query, can be specified multiple times") parser.add_option("-r", "--repoid", default=[], action='append', help="specify repo ids to query, can be specified multiple times (default is all enabled)") parser.add_option("-t", "--tempcache", default=False, action="store_true", help="Use a temp dir for storing/accessing yum-cache") parser.add_option("-q", "--quiet", default=0, action="store_true", help="quiet (no output to stderr)") parser.add_option("-n", "--newest", default=0, action="store_true", help="check only the newest packages in the repos") parser.add_option("--repofrompath", action="append", help="specify repoid & paths of additional repositories - unique repoid and path required, can be specified multiple times. Example. --repofrompath=myrepo,/path/to/repo") parser.add_option("-p", "--pkg", action="append", help="check closure for this package only") parser.add_option("-g", "--group", action="append", help="check closure for packages in this group only") (opts, args) = parser.parse_args() return (opts, args) # Note that this is a "real" API, used by spam-o-matic etc. # so we have to do at least some API guarantee stuff. class RepoClosure(yum.YumBase): def __init__(self, arch=[], config="/etc/yum.conf", builddeps=False, pkgonly=None, basearch=None, grouponly=None): yum.YumBase.__init__(self) if basearch: self.preconf.arch = basearch self.logger = logging.getLogger("yum.verbose.repoclosure") self.lookaside = [] self.builddeps = builddeps self.pkgonly = pkgonly self.grouponly = grouponly self.doConfigSetup(fn = config,init_plugins=False) self._rc_arches = arch if hasattr(self.repos, 'sqlite'): self.repos.sqlite = False self.repos._selectSackType() def evrTupletoVer(self,tup): """convert an evr tuple to a version string, return None if nothing to convert""" e, v, r = tup if v is None: return None val = v if e is not None: val = '%s:%s' % (e, v) if r is not None: val = '%s-%s' % (val, r) return val def readMetadata(self): self.doRepoSetup() archs = [] if not self._rc_arches: archs.extend(self.arch.archlist) else: for arch in self._rc_arches: archs.extend(self.arch.get_arch_list(arch)) if self.builddeps and 'src' not in archs: archs.append('src') self.doSackSetup(archs) for repo in self.repos.listEnabled(): self.repos.populateSack(which=[repo.id], mdtype='filelists') def getBrokenDeps(self, newest=False): unresolved = {} resolved = {} pkgs = self.pkgSack if newest: pkgs = self.pkgSack.returnNewestByNameArch() mypkgSack = ListPackageSack(pkgs) pkgtuplist = mypkgSack.simplePkgList() # toss out any of the obsoleted pkgs so we can't depsolve with them self.up = rpmUtils.updates.Updates([], pkgtuplist) self.up.rawobsoletes = mypkgSack.returnObsoletes() for pkg in pkgs: fo = self.up.checkForObsolete([pkg.pkgtup]) if fo: # useful debug to make sure the obsoletes is sane #print "ignoring obsolete pkg %s" % pkg #for i in fo[pkg.pkgtup]: # print i self.pkgSack.delPackage(pkg) # we've deleted items so remake the pkgs pkgs = self.pkgSack.returnNewestByNameArch() pkgtuplist = mypkgSack.simplePkgList() if self.builddeps: pkgs = filter(lambda x: x.arch == 'src', pkgs) pkglist = self.pkgonly if self.grouponly: if not pkglist: pkglist = [] for group in self.grouponly: groupobj = self.comps.return_group(group) if not groupobj: continue pkglist.extend(groupobj.packages) if pkglist: pkgs = filter(lambda x: x.name in pkglist, pkgs) for pkg in pkgs: if pkg.repoid in self.lookaside: # don't attempt to resolve dependancy issues for # packages from lookaside repositories continue for (req, flags, (reqe, reqv, reqr)) in pkg.returnPrco('requires'): if req.startswith('rpmlib'): continue # ignore rpmlib deps ver = self.evrTupletoVer((reqe, reqv, reqr)) if (req,flags,ver) in resolved: continue try: resolve_sack = self.whatProvides(req, flags, ver) except yum.Errors.RepoError, e: pass if len(resolve_sack) < 1: if pkg not in unresolved: unresolved[pkg] = [] unresolved[pkg].append((req, flags, ver)) continue if newest: resolved_by_newest = False for po in resolve_sack:# look through and make sure all our answers are newest-only if po.pkgtup in pkgtuplist: resolved_by_newest = True break if resolved_by_newest: resolved[(req,flags,ver)] = 1 else: if pkg not in unresolved: unresolved[pkg] = [] unresolved[pkg].append((req, flags, ver)) return unresolved def main(): (opts, cruft) = parseArgs() my = RepoClosure(arch=opts.arch, config=opts.config, builddeps=opts.builddeps, pkgonly=opts.pkg, grouponly=opts.group, basearch=opts.basearch) if opts.repofrompath: # setup the fake repos for repo in opts.repofrompath: repoid,repopath = tuple(repo.split(',')) if repopath.startswith('http') or repopath.startswith('ftp') or repopath.startswith('file:'): baseurl = repopath else: repopath = os.path.abspath(repopath) baseurl = 'file://' + repopath newrepo = yum.yumRepo.YumRepository(repoid) newrepo.name = repopath newrepo.baseurl = baseurl newrepo.basecachedir = my.conf.cachedir newrepo.metadata_expire = 0 newrepo.timestamp_check = False my.repos.add(newrepo) my.repos.enableRepo(newrepo.id) my.logger.info( "Added %s repo from %s" % (repoid,repopath)) if opts.repoid: for repo in my.repos.repos.values(): if ((repo.id not in opts.repoid) and (repo.id not in opts.lookaside)): repo.disable() else: repo.enable() if opts.lookaside: my.lookaside = opts.lookaside if os.geteuid() != 0 or opts.tempcache: cachedir = getCacheDir() if cachedir is None: my.logger.error("Error: Could not make cachedir, exiting") sys.exit(50) my.repos.setCacheDir(cachedir) if not opts.quiet: my.logger.info('Reading in repository metadata - please wait....') try: my.readMetadata() except yum.Errors.RepoError, e: my.logger.info(e) my.logger.info('Some dependencies may not be complete for this repository') my.logger.info('Run as root to get all dependencies or use -t to enable a user temp cache') if not opts.quiet: my.logger.info('Checking Dependencies') baddeps = my.getBrokenDeps(opts.newest) if opts.newest: num = len(my.pkgSack.returnNewestByNameArch()) else: num = len(my.pkgSack) repos = my.repos.listEnabled() if not opts.quiet: my.logger.info('Repos looked at: %s' % len(repos)) for repo in repos: my.logger.info(' %s' % repo) my.logger.info('Num Packages in Repos: %s' % num) pkgs = baddeps.keys() def sortbyname(a,b): return cmp(a.__str__(),b.__str__()) pkgs.sort(sortbyname) for pkg in pkgs: my.logger.info('package: %s from %s\n unresolved deps: ' % (pkg, pkg.repoid)) for (n, f, v) in baddeps[pkg]: req = '%s' % n if f: flag = LETTERFLAGS[f] req = '%s %s'% (req, flag) if v: req = '%s %s' % (req, v) my.logger.info(' %s' % req) if __name__ == "__main__": try: main() except (yum.Errors.YumBaseError, ValueError), e: print >> sys.stderr, str(e) sys.exit(1)
gpl-2.0
6,160,217,929,008,276,000
36.846906
192
0.560117
false
dominikgiermala/properties-editor
src/properties_editor.py
1
5314
import os import sublime import sublime_plugin from .lib.pyjavaproperties import Properties class AddEditPropertiesCommand(sublime_plugin.WindowCommand): def run(self, paths = []): # TODO: validate if *.properties file self.paths = paths self.window.show_input_panel("Properties to add/edit:", '', self.on_properties_put, None, None) def on_properties_put(self, properties_string): if properties_string and properties_string.strip() and '=' in properties_string: self.properties = {} for property_string in properties_string.split('\n'): key_value = property_string.split('=', 1) if key_value[0] and key_value[1]: self.properties[key_value[0]] = key_value[1] self.edit_properties(self.properties) def edit_properties(self, properties): files_without_key = {} files_with_key = {} for key in properties: files_with_key[key] = [] files_without_key[key] = [] for file in self.paths: p = Properties() p.load(open(file, encoding='latin-1', mode='r')) for key, value in properties.items(): if p.getProperty(key): files_with_key[key].append(os.path.basename(file)) else: files_without_key[key].append(os.path.basename(file)) p[key] = value p.store(open(file, encoding='latin-1', mode='w')) self.display_confirmation_message(files_without_key, files_with_key) def display_confirmation_message(self, files_without_key, files_with_key): confirmation_message = "" for key, value in self.properties.items(): confirmation_message += "Property " + key + "=" + value + " was: " if files_without_key[key]: confirmation_message += "\nAdded in files:\n" + "\n".join(files_without_key[key]) if files_with_key[key]: confirmation_message += "\n\nEdited in files:\n" + "\n".join(files_with_key[key]) confirmation_message += "\n\n" sublime.message_dialog(confirmation_message) class RemovePropertyCommand(sublime_plugin.WindowCommand): def run(self, paths = []): # TODO: validate if *.properties file self.paths = paths self.window.show_input_panel("Property key to remove:", '', self.on_key_put, None, None) def on_key_put(self, key): if key and key.strip(): self.key = key self.remove_property(key, self.paths) def remove_property(self, key, paths): files_without_key = [] files_with_key = [] for file in self.paths: p = Properties() p.load(open(file)) if p.getProperty(key): p.removeProperty(key) files_with_key.append(os.path.basename(file)) p.store(open(file, 'w')) else: files_without_key.append(os.path.basename(file)) self.display_confirmation_message(files_without_key, files_with_key) def display_confirmation_message(self, files_without_key, files_with_key): confirmation_message = "Property with key " + self.key + " was: " if files_with_key: confirmation_message += "\nRemoved in files:\n" + "\n".join(files_with_key) if files_without_key: confirmation_message += "\n\nNot found in files:\n" + "\n".join(files_without_key) if files_without_key: sublime.error_message(confirmation_message) else: sublime.message_dialog(confirmation_message) class RenameKeyCommand(sublime_plugin.WindowCommand): def run(self, paths = []): # TODO: validate if *.properties file self.paths = paths self.window.show_input_panel("Key to rename:", '', self.on_old_key_put, None, None) def on_old_key_put(self, old_key): if old_key and old_key.strip(): self.old_key = old_key self.window.show_input_panel("New key:", '', self.on_new_key_put, None, None) def on_new_key_put(self, new_key): if new_key and new_key.strip(): self.new_key = new_key self.rename_key(self.old_key, self.new_key) def rename_key(self, old_key, new_key): files_without_old_key = [] files_with_new_key = [] files_with_renamed_key = [] for file in self.paths: p = Properties() p.load(open(file)) if p.getProperty(old_key): if not p.getProperty(new_key): p[new_key] = p[old_key] p.removeProperty(old_key) files_with_renamed_key.append(os.path.basename(file)) else: files_with_new_key.append(os.path.basename(file)) else: files_without_old_key.append(os.path.basename(file)) p.store(open(file, 'w')) self.display_confirmation_message(files_without_old_key, files_with_new_key, files_with_renamed_key) def display_confirmation_message(self, files_without_old_key, files_with_new_key, files_with_renamed_key): confirmation_message = "Key " + self.old_key + " was: " if files_with_renamed_key: confirmation_message += "\nRenamed in files:\n" + "\n".join(files_with_renamed_key) if files_without_old_key: confirmation_message += "\n\nNot found in files:\n" + "\n".join(files_without_old_key) if files_with_new_key: confirmation_message += "\n\nKey " + self.new_key + " already exists in files:\n" + "\n".join(files_with_new_key) if files_without_old_key or files_with_new_key: sublime.error_message(confirmation_message) else: sublime.message_dialog(confirmation_message)
mit
-4,607,190,421,713,108,000
38.664179
119
0.649417
false
SIPp/pysipp
pysipp/launch.py
1
5708
""" Launchers for invoking SIPp user agents """ import subprocess import os import shlex import select import threading import signal import time from . import utils from pprint import pformat from collections import OrderedDict, namedtuple log = utils.get_logger() Streams = namedtuple("Streams", "stdout stderr") class TimeoutError(Exception): "SIPp process timeout exception" class PopenRunner(object): """Run a sequence of SIPp agents asynchronously. If any process terminates with a non-zero exit code, immediately kill all remaining processes and collect std streams. Adheres to an interface similar to `multiprocessing.pool.AsyncResult`. """ def __init__( self, subprocmod=subprocess, osmod=os, poller=select.epoll, ): # these could optionally be rpyc proxy objs self.spm = subprocmod self.osm = osmod self.poller = poller() # collector thread placeholder self._waiter = None # store proc results self._procs = OrderedDict() def __call__(self, cmds, block=True, rate=300, **kwargs): if self._waiter and self._waiter.is_alive(): raise RuntimeError("Not all processes from a prior run have completed") if self._procs: raise RuntimeError( "Process results have not been cleared from previous run" ) sp = self.spm os = self.osm DEVNULL = open(os.devnull, "wb") fds2procs = OrderedDict() # run agent commands in sequence for cmd in cmds: log.debug('launching cmd:\n"{}"\n'.format(cmd)) proc = sp.Popen(shlex.split(cmd), stdout=DEVNULL, stderr=sp.PIPE) fd = proc.stderr.fileno() log.debug("registering fd '{}' for pid '{}'".format(fd, proc.pid)) fds2procs[fd] = self._procs[cmd] = proc # register for stderr hangup events self.poller.register(proc.stderr.fileno(), select.EPOLLHUP) # limit launch rate time.sleep(1.0 / rate) # launch waiter self._waiter = threading.Thread(target=self._wait, args=(fds2procs,)) self._waiter.daemon = True self._waiter.start() return self.get(**kwargs) if block else self._procs def _wait(self, fds2procs): log.debug("started waiter for procs {}".format(fds2procs)) signalled = None left = len(fds2procs) collected = 0 while collected < left: pairs = self.poller.poll() # wait on hangup events log.debug("received hangup for pairs '{}'".format(pairs)) for fd, status in pairs: collected += 1 proc = fds2procs[fd] # attach streams so they can be read more then once log.debug("collecting streams for {}".format(proc)) proc.streams = Streams(*proc.communicate()) # timeout=2)) if proc.returncode != 0 and not signalled: # stop all other agents if there is a failure signalled = self.stop() log.debug("terminating waiter thread") def get(self, timeout=180): """Block up to `timeout` seconds for all agents to complete. Either return (cmd, proc) pairs or raise `TimeoutError` on timeout """ if self._waiter.is_alive(): self._waiter.join(timeout=timeout) if self._waiter.is_alive(): # kill them mfin SIPps signalled = self.stop() self._waiter.join(timeout=10) if self._waiter.is_alive(): # try to stop a few more times for _ in range(3): signalled = self.stop() self._waiter.join(timeout=1) if self._waiter.is_alive(): # some procs failed to terminate via signalling raise RuntimeError("Unable to kill all agents!?") # all procs were killed by SIGUSR1 raise TimeoutError( "pids '{}' failed to complete after '{}' seconds".format( pformat([p.pid for p in signalled.values()]), timeout ) ) return self._procs def stop(self): """Stop all agents with SIGUSR1 as per SIPp's signal handling""" return self._signalall(signal.SIGUSR1) def terminate(self): """Kill all agents with SIGTERM""" return self._signalall(signal.SIGTERM) def _signalall(self, signum): signalled = OrderedDict() for cmd, proc in self.iterprocs(): proc.send_signal(signum) log.warn( "sent signal '{}' to cmd '{}' with pid '{}'".format( signum, cmd, proc.pid ) ) signalled[cmd] = proc return signalled def iterprocs(self): """Iterate all processes which are still alive yielding (cmd, proc) pairs """ return ( (cmd, proc) for cmd, proc in self._procs.items() if proc and proc.poll() is None ) def is_alive(self): """Return bool indicating whether some agents are still alive""" return any(self.iterprocs()) def ready(self): """Return bool indicating whether all agents have completed""" return not self.is_alive() def clear(self): """Clear all processes from the last run""" assert self.ready(), "Not all processes have completed" self._procs.clear()
gpl-2.0
-7,029,735,260,306,038,000
32.576471
83
0.563595
false
Khan/pyobjc-framework-FSEvents
setup.py
1
1152
''' Wrappers for the "FSEvents" API in MacOS X. The functions in this framework allow you to reliably observe changes to the filesystem, even when your program is not running al the time. These wrappers don't include documentation, please check Apple's documention for information on how to use this framework and PyObjC's documentation for general tips and tricks regarding the translation between Python and (Objective-)C frameworks ''' from pyobjc_setup import setup, Extension setup( min_os_level='10.5', name='pyobjc-framework-FSEvents', version="2.5.1", description = "Wrappers for the framework FSEvents on Mac OS X", packages = [ "FSEvents" ], # setup_requires doesn't like git links, so we just have to # pip install these first: #setup_requires = [ # 'https://github.com/Khan/pyobjc-core/tarball/master', #], dependency_links = [ 'https://github.com/Khan/pyobjc-core/tarball/master', 'https://github.com/Khan/pyobjc-framework-Cocoa/tarball/master', ], ext_modules = [ Extension("FSEvents._callbacks", [ "Modules/_callbacks.m" ], ), ], )
mit
-8,843,281,425,080,310,000
33.909091
76
0.684896
false
maxmind/GeoIP2-python
tests/webservice_test.py
1
12944
#!/usr/bin/env python # -*- coding: utf-8 -*- import asyncio import copy import ipaddress import json import sys from typing import cast, Dict import unittest sys.path.append("..") # httpretty currently doesn't work, but mocket with the compat interface # does. from mocket import Mocket # type: ignore from mocket.plugins.httpretty import httpretty, httprettified # type: ignore import geoip2 from geoip2.errors import ( AddressNotFoundError, AuthenticationError, GeoIP2Error, HTTPError, InvalidRequestError, OutOfQueriesError, PermissionRequiredError, ) from geoip2.webservice import AsyncClient, Client class TestBaseClient(unittest.TestCase): base_uri = "https://geoip.maxmind.com/geoip/v2.1/" country = { "continent": {"code": "NA", "geoname_id": 42, "names": {"en": "North America"}}, "country": { "geoname_id": 1, "iso_code": "US", "names": {"en": "United States of America"}, }, "maxmind": {"queries_remaining": 11}, "registered_country": { "geoname_id": 2, "is_in_european_union": True, "iso_code": "DE", "names": {"en": "Germany"}, }, "traits": {"ip_address": "1.2.3.4", "network": "1.2.3.0/24"}, } # this is not a comprehensive representation of the # JSON from the server insights = cast(Dict, copy.deepcopy(country)) insights["traits"]["user_count"] = 2 insights["traits"]["static_ip_score"] = 1.3 def _content_type(self, endpoint): return ( "application/vnd.maxmind.com-" + endpoint + "+json; charset=UTF-8; version=1.0" ) @httprettified def test_country_ok(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/1.2.3.4", body=json.dumps(self.country), status=200, content_type=self._content_type("country"), ) country = self.run_client(self.client.country("1.2.3.4")) self.assertEqual( type(country), geoip2.models.Country, "return value of client.country" ) self.assertEqual(country.continent.geoname_id, 42, "continent geoname_id is 42") self.assertEqual(country.continent.code, "NA", "continent code is NA") self.assertEqual( country.continent.name, "North America", "continent name is North America" ) self.assertEqual(country.country.geoname_id, 1, "country geoname_id is 1") self.assertIs( country.country.is_in_european_union, False, "country is_in_european_union is False", ) self.assertEqual(country.country.iso_code, "US", "country iso_code is US") self.assertEqual( country.country.names, {"en": "United States of America"}, "country names" ) self.assertEqual( country.country.name, "United States of America", "country name is United States of America", ) self.assertEqual( country.maxmind.queries_remaining, 11, "queries_remaining is 11" ) self.assertIs( country.registered_country.is_in_european_union, True, "registered_country is_in_european_union is True", ) self.assertEqual( country.traits.network, ipaddress.ip_network("1.2.3.0/24"), "network" ) self.assertEqual(country.raw, self.country, "raw response is correct") @httprettified def test_me(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/me", body=json.dumps(self.country), status=200, content_type=self._content_type("country"), ) implicit_me = self.run_client(self.client.country()) self.assertEqual( type(implicit_me), geoip2.models.Country, "country() returns Country object" ) explicit_me = self.run_client(self.client.country()) self.assertEqual( type(explicit_me), geoip2.models.Country, "country('me') returns Country object", ) @httprettified def test_200_error(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/1.1.1.1", body="", status=200, content_type=self._content_type("country"), ) with self.assertRaisesRegex( GeoIP2Error, "could not decode the response as JSON" ): self.run_client(self.client.country("1.1.1.1")) @httprettified def test_bad_ip_address(self): with self.assertRaisesRegex( ValueError, "'1.2.3' does not appear to be an IPv4 " "or IPv6 address" ): self.run_client(self.client.country("1.2.3")) @httprettified def test_no_body_error(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + "1.2.3.7", body="", status=400, content_type=self._content_type("country"), ) with self.assertRaisesRegex( HTTPError, "Received a 400 error for .* with no body" ): self.run_client(self.client.country("1.2.3.7")) @httprettified def test_weird_body_error(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + "1.2.3.8", body='{"wierd": 42}', status=400, content_type=self._content_type("country"), ) with self.assertRaisesRegex( HTTPError, "Response contains JSON but it does not " "specify code or error keys", ): self.run_client(self.client.country("1.2.3.8")) @httprettified def test_bad_body_error(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + "1.2.3.9", body="bad body", status=400, content_type=self._content_type("country"), ) with self.assertRaisesRegex( HTTPError, "it did not include the expected JSON body" ): self.run_client(self.client.country("1.2.3.9")) @httprettified def test_500_error(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + "1.2.3.10", status=500 ) with self.assertRaisesRegex(HTTPError, r"Received a server error \(500\) for"): self.run_client(self.client.country("1.2.3.10")) @httprettified def test_300_error(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + "1.2.3.11", status=300, content_type=self._content_type("country"), ) with self.assertRaisesRegex( HTTPError, r"Received a very surprising HTTP status \(300\) for" ): self.run_client(self.client.country("1.2.3.11")) @httprettified def test_ip_address_required(self): self._test_error(400, "IP_ADDRESS_REQUIRED", InvalidRequestError) @httprettified def test_ip_address_not_found(self): self._test_error(404, "IP_ADDRESS_NOT_FOUND", AddressNotFoundError) @httprettified def test_ip_address_reserved(self): self._test_error(400, "IP_ADDRESS_RESERVED", AddressNotFoundError) @httprettified def test_permission_required(self): self._test_error(403, "PERMISSION_REQUIRED", PermissionRequiredError) @httprettified def test_auth_invalid(self): self._test_error(400, "AUTHORIZATION_INVALID", AuthenticationError) @httprettified def test_license_key_required(self): self._test_error(401, "LICENSE_KEY_REQUIRED", AuthenticationError) @httprettified def test_account_id_required(self): self._test_error(401, "ACCOUNT_ID_REQUIRED", AuthenticationError) @httprettified def test_user_id_required(self): self._test_error(401, "USER_ID_REQUIRED", AuthenticationError) @httprettified def test_account_id_unkown(self): self._test_error(401, "ACCOUNT_ID_UNKNOWN", AuthenticationError) @httprettified def test_user_id_unkown(self): self._test_error(401, "USER_ID_UNKNOWN", AuthenticationError) @httprettified def test_out_of_queries_error(self): self._test_error(402, "OUT_OF_QUERIES", OutOfQueriesError) def _test_error(self, status, error_code, error_class): msg = "Some error message" body = {"error": msg, "code": error_code} httpretty.register_uri( httpretty.GET, self.base_uri + "country/1.2.3.18", body=json.dumps(body), status=status, content_type=self._content_type("country"), ) with self.assertRaisesRegex(error_class, msg): self.run_client(self.client.country("1.2.3.18")) @httprettified def test_unknown_error(self): msg = "Unknown error type" ip = "1.2.3.19" body = {"error": msg, "code": "UNKNOWN_TYPE"} httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + ip, body=json.dumps(body), status=400, content_type=self._content_type("country"), ) with self.assertRaisesRegex(InvalidRequestError, msg): self.run_client(self.client.country(ip)) @httprettified def test_request(self): httpretty.register_uri( httpretty.GET, self.base_uri + "country/" + "1.2.3.4", body=json.dumps(self.country), status=200, content_type=self._content_type("country"), ) self.run_client(self.client.country("1.2.3.4")) request = httpretty.last_request self.assertEqual( request.path, "/geoip/v2.1/country/1.2.3.4", "correct URI is used" ) self.assertEqual( request.headers["Accept"], "application/json", "correct Accept header" ) self.assertRegex( request.headers["User-Agent"], "^GeoIP2-Python-Client/", "Correct User-Agent", ) self.assertEqual( request.headers["Authorization"], "Basic NDI6YWJjZGVmMTIzNDU2", "correct auth", ) @httprettified def test_city_ok(self): httpretty.register_uri( httpretty.GET, self.base_uri + "city/" + "1.2.3.4", body=json.dumps(self.country), status=200, content_type=self._content_type("city"), ) city = self.run_client(self.client.city("1.2.3.4")) self.assertEqual(type(city), geoip2.models.City, "return value of client.city") self.assertEqual( city.traits.network, ipaddress.ip_network("1.2.3.0/24"), "network" ) @httprettified def test_insights_ok(self): httpretty.register_uri( httpretty.GET, self.base_uri + "insights/1.2.3.4", body=json.dumps(self.insights), status=200, content_type=self._content_type("country"), ) insights = self.run_client(self.client.insights("1.2.3.4")) self.assertEqual( type(insights), geoip2.models.Insights, "return value of client.insights" ) self.assertEqual( insights.traits.network, ipaddress.ip_network("1.2.3.0/24"), "network" ) self.assertEqual(insights.traits.static_ip_score, 1.3, "static_ip_score is 1.3") self.assertEqual(insights.traits.user_count, 2, "user_count is 2") def test_named_constructor_args(self): id = 47 key = "1234567890ab" client = self.client_class(account_id=id, license_key=key) self.assertEqual(client._account_id, str(id)) self.assertEqual(client._license_key, key) def test_missing_constructor_args(self): with self.assertRaises(TypeError): self.client_class(license_key="1234567890ab") with self.assertRaises(TypeError): self.client_class("47") class TestClient(TestBaseClient): def setUp(self): self.client_class = Client self.client = Client(42, "abcdef123456") def run_client(self, v): return v class TestAsyncClient(TestBaseClient): def setUp(self): self._loop = asyncio.new_event_loop() self.client_class = AsyncClient self.client = AsyncClient(42, "abcdef123456") def tearDown(self): self._loop.run_until_complete(self.client.close()) self._loop.close() def run_client(self, v): return self._loop.run_until_complete(v) del TestBaseClient if __name__ == "__main__": unittest.main()
apache-2.0
-4,090,489,454,764,451,300
32.020408
88
0.584518
false
pbl-cloud/paas-manager
paas_manager/app/util/gmail.py
1
1115
import sys import smtplib from email.mime.text import MIMEText from email.utils import formatdate from ... import config def create_message(from_addr, to_addr, subject, message, encoding): body = MIMEText(message, 'plain', encoding) body['Subject'] = subject body['From'] = from_addr body['To'] = to_addr body['Date'] = formatdate() return body def send_via_gmail(from_addr, to_addr, body): s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login( config['gmail']['user'], config['gmail']['password']) s.sendmail(from_addr, [to_addr], body.as_string()) s.close() def gmail(message, to_addr): body = create_message( config['gmail']['user'], to_addr, '[Notification]', message, 'utf8') send_via_gmail(config['gmail']['user'], to_addr, body) return if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if (argc < 3): print('USAGE: python gmail.py address message') raise SystemExit(0) else: to_addr = argvs[1] message = argvs[2] gmail(message, to_addr)
mit
-1,493,291,774,116,415,200
24.340909
76
0.612556
false
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_12_01/aio/operations/_bgp_service_communities_operations.py
1
5069
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class BgpServiceCommunitiesOperations: """BgpServiceCommunitiesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2016_12_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs ) -> AsyncIterable["_models.BgpServiceCommunityListResult"]: """Gets all the available bgp service communities. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either BgpServiceCommunityListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_12_01.models.BgpServiceCommunityListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.BgpServiceCommunityListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-12-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('BgpServiceCommunityListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/bgpServiceCommunities'} # type: ignore
mit
-1,186,094,966,996,550,100
45.935185
133
0.648846
false
jkomiyama/duelingbanditlib
gather.py
1
1151
#!/usr/bin/env python # coding:utf-8 #a tool for merging multiple simulation results import sys,os,re def avg(elems): return sum(elems)/float(len(elems)) def splitavg(splits): l = len(splits[0]) for sp in splits: if len(sp) != l: print "split size not match" sys.exit() sums = [0 for i in range(l)] for sp in splits: for i in range(l): sums[i] += float(sp[i]) return map(lambda i:i/len(splits), sums) def gather(filenames): lines_files = [] for afile in filenames: lines_files.append([line.strip() for line in file(afile, "r").readlines() if len(line)>0]) l = 0 for i in range(len(lines_files)-1): if len(lines_files[i]) != len(lines_files[i+1]): print "line num does not match!" sys.exit() while l < len(lines_files[0]): if len(lines_files[0][l])==0: pass elif lines_files[0][l][0]=="#": print lines_files[0][l] else: splits = [lines_files[i][l].split(" ") for i in range(len(lines_files))] avgs = splitavg(splits) avgs[0] = int(avgs[0]) print " ".join(map(str, avgs)) l+=1 if __name__ == "__main__": gather(sys.argv[1:])
mit
-3,247,382,617,199,091,000
24.577778
94
0.591659
false
dofeldsc/vivo_uos
my_pump/test/test_vivopump.py
1
51767
#!/usr/bin/env/python # coding=utf-8 """ test_vivopump.py -- Test cases for vivopump """ import sys sys.path.insert(0, "/var/fis_data/vivo-pump") import unittest from pump.vivopump import new_uri, read_csv, write_csv, vivo_query, write_update_def, \ read_csv_fp, write_csv_fp, get_vivo_ufid, get_vivo_authors, get_vivo_types, get_vivo_sponsorid, \ make_update_query, read_update_def, make_rdf_term, get_graph, \ InvalidDefException, PathLengthException, parse_pages, parse_date_parts from pump.pump import Pump __author__ = "Michael Conlon" __copyright__ = "Copyright 2016 (c) Michael Conlon" __license__ = "New BSD license" __version__ = "1.00" QUERY_PARMS = {'queryuri': 'http://localhost:8080/vivo/api/sparqlQuery', 'username': '[email protected]', 'password': 'test_admin', 'uriprefix': 'http://fis.virtuos.uos.de/vivo/individual/n', 'prefix': ('PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n' 'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n' 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n' 'PREFIX owl: <http://www.w3.org/2002/07/owl#>\n' 'PREFIX vitro: <http://vitro.mannlib.cornell.edu/ns/vitro/0.7#>\n' 'PREFIX bibo: <http://purl.org/ontology/bibo/>\n' 'PREFIX event: <http://purl.org/NET/c4dm/event.owl#>\n' 'PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n' 'PREFIX obo: <http://purl.obolibrary.org/obo/>\n' 'PREFIX skos: <http://www.w3.org/2004/02/skos/core#>\n' 'PREFIX uf: <http://vivo.school.edu/ontology/uf-extension#>\n' 'PREFIX ufVivo: <http://vivo.school.edu/ontology/uf-extension#>\n' 'PREFIX vitrop: <http://vitro.mannlib.cornell.edu/ns/vitro/public#>\n' 'PREFIX vivo: <http://vivoweb.org/ontology/core#>\n' )} class ReplaceInitialsCase(unittest.TestCase): def test_replace_initials_default(self): from pump.vivopump import replace_initials t = replace_initials('This is A. test') self.assertEqual(t, 'This is A test') def test_replace_initials_two(self): from pump.vivopump import replace_initials t = replace_initials('This is A. B. test') self.assertEqual(t, 'This is A B test') def test_replace_initials_consecutive_dots(self): from pump.vivopump import replace_initials t = replace_initials('This is A.. B. test') self.assertEqual(t, 'This is A. B test') def test_replace_initials_consecutive_initials(self): from pump.vivopump import replace_initials t = replace_initials('This is A.B. test') self.assertEqual(t, 'This is AB test') class ParsePagesCase(unittest.TestCase): def test_parse_pages_default(self): [a, b] = parse_pages('30-55') print a, b self.assertEqual(a, '30') self.assertEqual(b, '55') def test_parse_pages_no_end(self): [a, b] = parse_pages('30') print a, b self.assertEqual(a, '30') self.assertEqual(b, '') class ParseDatePartsCase(unittest.TestCase): def test_parse_date_parts_default(self): date = parse_date_parts('AUG', '2014') print date self.assertEqual(date, '2014-08-01T00:00:00') def test_parse_date_parts_with_day(self): date = parse_date_parts('AUG 15', '2014') print date self.assertEqual(date, '2014-08-15T00:00:00') def test_parse_date_parts_with_months(self): date = parse_date_parts('JUL-AUG', '2014') print date self.assertEqual(date, '2014-07-01T00:00:00') class NewUriTestCase(unittest.TestCase): def test_new_uri_default(self): uri = new_uri(QUERY_PARMS) print uri self.assertTrue(len(uri) > 0) def test_new_uri_prefix(self): parms = QUERY_PARMS parms['uriprefix'] = 'http://my.vivo.edu/date' uri = new_uri(parms) print uri self.assertTrue(uri.startswith('http://my.vivo.edu')) class ReadUpdateDefTestCase(unittest.TestCase): def test_read_normal_def(self): update_def = read_update_def('data/grant_def.json', prefix=QUERY_PARMS['prefix']) print update_def self.assertTrue(update_def.keys() == ['entity_def', 'column_defs']) def test_substitution(self): from rdflib import URIRef update_def = read_update_def('data/pump_def.json', prefix=QUERY_PARMS['prefix']) self.assertTrue(update_def['entity_def']['type']) == \ URIRef(u'http://vivoweb.org/ontology/core#Building') def test_invalid_multiple_def(self): with self.assertRaises(InvalidDefException): update_def = read_update_def('data/grant_invalid_multiple_def.json', prefix=QUERY_PARMS['prefix']) print update_def def test_valid_closure_object_def(self): update_def = read_update_def('data/mentoring_def.json', prefix=QUERY_PARMS['prefix']) self.assertTrue(set(update_def.keys()) == set(['entity_def', 'column_defs', 'closure_defs'])) def test_invalid_closure_object_def(self): with self.assertRaises(InvalidDefException): update_def = read_update_def('data/grant_invalid_closure_object_def.json', prefix=QUERY_PARMS['prefix']) print update_def def test_novalue_def(self): with self.assertRaises(InvalidDefException): update_def = read_update_def('data/person_novalue_def.json', prefix=QUERY_PARMS['prefix']) print update_def def test_closure_not_in_column_def(self): with self.assertRaises(InvalidDefException): update_def = read_update_def('data/grant_invalid_closure_def.json', prefix=QUERY_PARMS['prefix']) print update_def def test_pathlength_def(self): with self.assertRaises(PathLengthException): p = Pump('data/grant_invalid_path_length_def.json') n = p.get() print n def test_update_def_order(self): update_def = read_update_def('data/grant_def.json', prefix=QUERY_PARMS['prefix']) self.assertEqual(update_def['entity_def']['order'][0:4], [u'deptid', u'direct_costs', u'cois', u'end_date']) class MakeUpdateQueryTestCase(unittest.TestCase): def test_make_query(self): update_def = read_update_def('../examples/education//education_def.json', prefix=QUERY_PARMS['prefix']) print update_def for column_name, path in update_def['column_defs'].items(): update_query = make_update_query(update_def['entity_def']['entity_sparql'], path) print update_query self.assertTrue(len(update_query) > 0) class MakeRdfTermTestCase(unittest.TestCase): def test_uriref_case(self): from rdflib import URIRef input_dict = { "type": "uri", "value": "http://vivo.school.edu/individual/n531532305" } rdf_term = make_rdf_term(input_dict) print rdf_term self.assertTrue(type(rdf_term) is URIRef) class GetGraphTestCase(unittest.TestCase): def test_normal_case(self): update_def = read_update_def('data/grant_def.json', prefix=QUERY_PARMS['prefix']) a = get_graph(update_def, QUERY_PARMS) for (s, p, o) in a.triples((None, None, None)): print s, p, o self.assertTrue(len(a) == 29) class ReadCSVTestCase(unittest.TestCase): def test_read_csv_keys(self): data = read_csv("data/extension.txt", delimiter='\t') print data self.assertTrue(data.keys() == range(1, 74)) def test_sorted_csv(self): data = read_csv("data/extension.txt", delimiter='\t') sdata = {} order = sorted(data, key=lambda rown: data[rown]['name'], reverse=True) row = 1 for o in order: sdata[row] = data[o] row += 1 print sdata def test_read_csv_minimal(self): data = read_csv("data/minimal.txt", delimiter='|') data_string = "{1: {u'overview': u'None', u'uri': u'http://vivo.school.edu/individual/n7023304'}}" self.assertEqual(data_string, str(data)) def test_read_csv_fp(self): fp = open("data/minimal.txt", 'rU') data = read_csv_fp(fp, delimiter='|') fp.close() data_string = "{1: {u'overview': u'None', u'uri': u'http://vivo.school.edu/individual/n7023304'}}" self.assertEqual(data_string, str(data)) class WriteCSVTestCase(unittest.TestCase): def test_write_csv(self): data = read_csv("data/buildings.txt", delimiter='\t') write_csv("data/buildings_out.txt", data, delimiter='\t') data2 = read_csv("data/buildings.txt", delimiter='\t') self.assertTrue(data == data2) def test_write_csv_fp(self): data = read_csv("data/buildings.txt", delimiter='\t') fp = open('data/buildings_out.txt', 'w') write_csv_fp(fp, data, delimiter='\t') fp.close() data2 = read_csv("data/buildings.txt", delimiter='\t') self.assertTrue(data == data2) class VIVOQueryTestCase(unittest.TestCase): def test_vivo_query(self): result = vivo_query(""" SELECT ?label WHERE { <http://vivo.school.edu/individual/n1133> rdfs:label ?label } """, QUERY_PARMS) print result self.assertTrue(len(result) > 0) def test_bad_request(self): from SPARQLWrapper import SPARQLExceptions with self.assertRaises(SPARQLExceptions.QueryBadFormed): result = vivo_query(""" SEWECT ?label WHERE { <http://vivo.school.edu/individual/n25562> rdfs:label ?label } """, QUERY_PARMS) print result class VIVOGetTypesTestCase(unittest.TestCase): def test_vivo_get_types(self): result = get_vivo_types("?uri a foaf:Person .", QUERY_PARMS) self.assertTrue(len(result) > 0) class VIVOGetUFIDTestCase(unittest.TestCase): def test_vivo_get_ufid(self): result = get_vivo_ufid(QUERY_PARMS) self.assertTrue(len(result) > 0) class VIVOGetAuthorsTestCase(unittest.TestCase): def test_vivo_get_authors(self): result = get_vivo_authors(QUERY_PARMS) self.assertTrue(len(result) > 0) class VIVOGetSponsorsTestCase(unittest.TestCase): def test_vivo_get_sponsorid(self): result = get_vivo_sponsorid(QUERY_PARMS) print len(result) self.assertTrue(len(result) > 0) class WriteUpdateDefTestCase(unittest.TestCase): def test_create_file(self): import os.path update_def = "{}" filename = "__write_update_def_test_create_file.json" write_update_def(update_def, filename) self.assertTrue(os.path.isfile(filename)) os.remove(filename) class PumpTestCase(unittest.TestCase): def test_pump_serialize(self): p = Pump("data/pump_def.json") self.assertTrue(p.serialize().startswith("{\"entity_def\":")) def test_pump_filename(self): p = Pump("data/building_def.json") self.assertTrue("vivo:Building" in p.serialize()) def test_pump_summarize(self): p = Pump("data/building_def.json") result = p.summarize() print result self.assertTrue("Pump Summary for data/building" in result) def test_pump_test(self): p = Pump("data/building_def.json") result = p.test() print result self.assertTrue("Test end" in result) def test_pump_get_default_filename(self): import os p = Pump("data/building_def.json") filename = p.out_filename p.get() self.assertTrue(os.path.isfile(filename)) os.remove(filename) def test_pump_get(self): p = Pump("data/building_def.json") n_rows = p.get() print n_rows self.assertEqual(2, n_rows) def test_pump_update(self): p = Pump("data/building_def.json") p.out_filename = "data/pump_data.txt" [add, sub] = p.update() self.assertEqual(2, len(add)) self.assertEqual(2, len(sub)) class PumpGetTestCase(unittest.TestCase): def test_get_no_filter(self): p = Pump() p.filter = False n_rows = p.get() self.assertEqual(2, n_rows) def test_get_filter(self): p = Pump("data/building_def.json") p.out_filename = "data/buildings_filtered.txt" n_rows = p.get() self.assertEqual(2, n_rows) def test_boolean_get(self): from pump.vivopump import read_csv p = Pump("data/faculty_boolean_def.json") p.get() data = read_csv('pump_data.txt', delimiter='\t') nfac = 0 for row, vals in data.items(): if vals['faculty'] == '1': nfac += 1 self.assertEqual(5, nfac) class PumpUpdateCallTestCase(unittest.TestCase): def test_default_usage(self): p = Pump() self.assertTrue("pump_def.json" in p.summarize()) # Using the default definition def test_no_update_file(self): p = Pump() p.out_filename = 'data/no_update_file.txt' with self.assertRaises(IOError): p.update() def test_normal_inject(self): p = Pump() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n8984374104', u'abbreviation': u'None'}} p.update() self.assertTrue("8984374104" in str(p.update_data)) # Using the injected data, not default def test_empty_column_defs(self): Pump("data/building_empty_column_def.json") self.assertTrue(True) # No error thrown reading def def test_missing_uri_column_inject(self): p = Pump() p.update_data = {1: {u'overview': u'None'}} with self.assertRaises(KeyError): p.update() def test_inject_empty_original_graph(self): from rdflib import Graph, URIRef p = Pump() p.original_graph = Graph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n8984374104', u'abbreviation': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 0 and (None, URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), URIRef("http://vivoweb.org/ontology/core#Building")) in add) class MakeRdfTermFromSourceTestCase(unittest.TestCase): def test_empty(self): from rdflib import Literal from pump.vivopump import make_rdf_term_from_source step_def = { "object": { "literal": True, "datatype": "xsd:string" } } source_val = '' rdf_term = make_rdf_term_from_source(source_val, step_def) self.assertTrue(unicode(rdf_term) == unicode(Literal(''))) def test_empty_compare(self): from rdflib import Literal from pump.vivopump import make_rdf_term_from_source step_def = { "object": { "literal": True, "datatype": "xsd:string" } } source_val = '' rdf_term = make_rdf_term_from_source(source_val, step_def) self.assertTrue(rdf_term == Literal('', datatype="http://www.w3.org/2001/XMLSchema#string")) def test_lang(self): from rdflib import Literal from pump.vivopump import make_rdf_term_from_source step_def = { "object": { "literal": True, "lang": "fr" } } source_val = 'a' rdf_term = make_rdf_term_from_source(source_val, step_def) self.assertTrue(rdf_term == Literal('a', lang="fr")) def test_ref(self): from rdflib import URIRef from pump.vivopump import make_rdf_term_from_source step_def = { "object": { "literal": False } } source_val = 'http://any' rdf_term = make_rdf_term_from_source(source_val, step_def) self.assertTrue(rdf_term == URIRef("http://any")) def test_ref_string(self): from rdflib import URIRef from pump.vivopump import make_rdf_term_from_source step_def = { "object": { "literal": False } } source_val = 'http://any' rdf_term = make_rdf_term_from_source(source_val, step_def) self.assertTrue(unicode(rdf_term) == unicode(URIRef("http://any"))) class PumpUpdateLiteralsTestCase(unittest.TestCase): def test_add_unicode(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n710', u'name': u'ქართული'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n710"), URIRef("http://www.w3.org/2000/01/rdf-schema#label"), Literal("ქართული")) in add) def test_change_unicode(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n711', u'name': u'বিষ্ণুপ্রিয়া মণিপুরী'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n711"), URIRef("http://www.w3.org/2000/01/rdf-schema#label"), Literal("বিষ্ণুপ্রিয়া মণিপুরী")) in add and (URIRef("http://vivo.school.edu/individual/n711"), URIRef("http://www.w3.org/2000/01/rdf-schema#label"), Literal("Ελληνικά")) in sub) def test_delete_unicode(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n711', u'name': u'None'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n711"), URIRef("http://www.w3.org/2000/01/rdf-schema#label"), Literal("Ελληνικά")) in sub) def test_change_with_datatype(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph p = Pump("data/building_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1001011525', u'abbreviation': u'PH9'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n1001011525"), URIRef("http://vivoweb.org/ontology/core#abbreviation"), Literal("PH9", datatype=XSD.string)) in add) def test_change_with_lang(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump("data/building_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1001011525', u'name': u'Building 42'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n1001011525"), URIRef("http://www.w3.org/2000/01/rdf-schema#label"), Literal("Building 42", lang="en-US")) in add) def test_add_without_datatype(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump("data/building_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1001011525', u'url': u'http://a'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 4 and len(sub) == 0 and (None, URIRef("http://vivoweb.org/ontology/core#linkURI"), Literal("http://a")) in add) def test_change_without_lang(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump("data/org_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n2525', u'name': u'Ad ver tising'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n2525"), URIRef("http://www.w3.org/2000/01/rdf-schema#label"), Literal("Ad ver tising")) in add) class PumpUpdateTwoTestCase(unittest.TestCase): # Test various scenarios of a length two path multiple predicate/single leaf def test_blank_to_empty(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n44', u'pis': u''}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 0) def test_none_to_empty(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n44', u'pis': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 0) def test_add_one_to_empty(self): from rdflib import URIRef from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n44', u'pis': u'http://vivo.school.edu/individual/n1133'}} [add, sub] = p.update() self.assertTrue( len(add) == 3 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n44"), URIRef("http://vivoweb.org/ontology/core#relates"), None) in add) def test_add_two_to_empty(self): from rdflib import URIRef from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n44', u'pis': u'http://vivo.school.edu/individual/n1133;http://vivo.school.edu/individual/n3413'}} [add, sub] = p.update() self.assertTrue( len(add) == 6 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n44"), URIRef("http://vivoweb.org/ontology/core#relates"), None) in add) def test_blank_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u''}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 0) def test_none_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 6) def test_add_existing_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'http://vivo.school.edu/individual/n1133'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 3) def test_add_two_existing_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'http://vivo.school.edu/individual/n1133;' 'http://vivo.school.edu/individual/n3413'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 0) def test_add_one_new_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'http://vivo.school.edu/individual/n1134'}} [add, sub] = p.update() self.assertTrue( len(add) == 3 and len(sub) == 6) def test_add_one_new_one_existing_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'http://vivo.school.edu/individual/n1133;' 'http://vivo.school.edu/individual/n3414'}} [add, sub] = p.update() self.assertTrue( len(add) == 3 and len(sub) == 3) def test_add_two_new_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'http://vivo.school.edu/individual/n1134;' 'http://vivo.school.edu/individual/n3414'}} [add, sub] = p.update() self.assertTrue( len(add) == 6 and len(sub) == 6) def test_add_two_new_two_existing_to_two(self): from testgraph import TestGraph p = Pump("data/grant_pi_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n45', u'pis': u'http://vivo.school.edu/individual/n1133;' 'http://vivo.school.edu/individual/n1134;' 'http://vivo.school.edu/individual/n3413;' 'http://vivo.school.edu/individual/n3414;'}} [add, sub] = p.update() self.assertTrue( len(add) == 6 and len(sub) == 0) class PumpUpdateDataTestCase(unittest.TestCase): def test_unique_one_add(self): from rdflib import URIRef, Literal from testgraph import TestGraph p = Pump() p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n2525', u'abbreviation': u'PH9'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n2525"), URIRef("http://vivoweb.org/ontology/core#abbreviation"), Literal("PH9")) in add) def test_unique_one_change(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph p = Pump() p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1001011525', u'abbreviation': u'JWR2'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and (URIRef("http://vivo.school.edu/individual/n1001011525"), URIRef("http://vivoweb.org/ontology/core#abbreviation"), Literal("JWR2")) in add and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n1001011525"), URIRef("http://vivoweb.org/ontology/core#abbreviation"), Literal("JWRU", datatype=XSD.string)) in sub) def test_unique_one_delete(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph p = Pump() p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1001011525', u'abbreviation': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n1001011525"), URIRef("http://vivoweb.org/ontology/core#abbreviation"), Literal("JWRU", datatype=XSD.string)) in sub) def test_unique_two_add(self): from rdflib import URIRef from testgraph import TestGraph p = Pump("data/grant_dates_def.json") p.original_graph = TestGraph() # In this example, dates are enumerated p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n44', u'start_date': u'2006'}} [add, sub] = p.update() self.assertTrue( len(add) == 3 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n44"), URIRef("http://vivoweb.org/ontology/core#dateTimeInterval"), None) in add) def test_unique_two_change(self): from rdflib import URIRef from testgraph import TestGraph p = Pump("data/grant_dates_def.json") p.original_graph = TestGraph() # In this example, dates are enumerated p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n125', u'end_date': u'2006'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n126"), URIRef("http://vivoweb.org/ontology/core#end"), None) in add and (URIRef("http://vivo.school.edu/individual/n126"), URIRef("http://vivoweb.org/ontology/core#end"), None) in sub) def test_unique_two_delete(self): from rdflib import URIRef from testgraph import TestGraph p = Pump("data/grant_dates_def.json") p.original_graph = TestGraph() # In this example, dates are enumerated p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n125', u'end_date': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n126"), URIRef("http://vivoweb.org/ontology/core#end"), None) in sub) def test_unique_three_add_fullpath(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph p = Pump("data/org_def.json") p.original_graph = TestGraph() # Add a zip code to an org without an address, so a full path will need # to be created p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n2525', u'zip': u'32653'}} [add, sub] = p.update() self.assertTrue( len(add) == 5 and len(sub) == 0 and (None, URIRef("http://www.w3.org/2006/vcard/ns#postalCode"), Literal("32653", datatype=XSD.string)) in add) def test_unique_three_add_to_existing(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph # Add a zip code to an address already exists, the zip needs to be # added to the existing address p = Pump("data/org_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n3535', u'zip': u'32653'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 0 and (None, URIRef("http://www.w3.org/2006/vcard/ns#postalCode"), Literal("32653", datatype=XSD.string)) in add) def test_unique_three_change(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph # Change the zip code on an existing address p = Pump("data/org_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n4545', u'zip': u'32653'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (None, URIRef("http://www.w3.org/2006/vcard/ns#postalCode"), Literal("32653", datatype=XSD.string)) in add and (None, URIRef("http://www.w3.org/2006/vcard/ns#postalCode"), Literal("32604", datatype=XSD.string)) in sub) def test_unique_three_delete(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph # Delete the zip code on an existing address p = Pump("data/org_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n4545', u'zip': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 1 and (None, URIRef("http://www.w3.org/2006/vcard/ns#postalCode"), Literal("32604", datatype=XSD.string)) in sub) def test_unique_three_delete_not_found(self): from testgraph import TestGraph # Delete the zip code from an existing address that doesn't have a zip code. p = Pump("data/org_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n3535', u'zip': u'None'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 0) def test_unique_three_add(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph # Add a start date to a grant. There is no date time interval, so a full path will need to be created p = Pump("data/grant_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n44', u'start_date': u'2015-03-01'}} [add, sub] = p.update() self.assertTrue( len(add) == 5 and len(sub) == 0 and (None, URIRef("http://vivoweb.org/ontology/core#dateTime"), Literal("2015-03-01", datatype=XSD.datetime)) in add) def test_unique_three_add_partial_path(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph p = Pump("data/grant_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n55', u'start_date': u'2006-03-01'}} [add, sub] = p.update() self.assertTrue( len(add) == 3 and len(sub) == 0 and (None, URIRef("http://vivoweb.org/ontology/core#dateTime"), Literal("2006-03-01", datatype=XSD.datetime)) in add) def test_unique_three_change_datetime(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph p = Pump("data/grant_def.json") p.original_graph = TestGraph() # WARNING. This test passes by changing the start date value on an existing datetime. # Not sure if this is the desired behavior. p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n125', u'start_date': u'2006-03-02'}} [add, sub] = p.update() self.assertTrue( len(add) == 1 and len(sub) == 1 and (None, URIRef("http://vivoweb.org/ontology/core#dateTime"), Literal("2006-03-02", datatype=XSD.datetime)) in add and (None, URIRef("http://vivoweb.org/ontology/core#dateTime"), Literal("2010-04-01", datatype=XSD.datetime)) in sub) def test_unique_three_delete_datetime(self): from rdflib import URIRef, Literal, XSD from testgraph import TestGraph # WARNING: Delete start date value from existing datetime interval. This may not be the desirable data # management action p = Pump("data/grant_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n125', u'start_date': u'None'}} [add, sub] = p.update() self.assertTrue( len(add) == 0 and len(sub) == 1 and (None, URIRef("http://vivoweb.org/ontology/core#dateTime"), Literal("2010-04-01", datatype=XSD.datetime)) in sub) def test_multiple_one_add(self): from rdflib import URIRef from testgraph import TestGraph # Add multiple values for an attribute to an entity that has no values for the attribute p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'research_areas': u'http://vivo.school.edu/individual/n2551317090;http://vivo.school.edu/individual/n157098'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue( len(add) == 2 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n1723097935"), URIRef("http://vivoweb.org/ontology/core#hasResearchArea"), URIRef("http://vivo.school.edu/individual/n2551317090")) in add and (URIRef("http://vivo.school.edu/individual/n1723097935"), URIRef("http://vivoweb.org/ontology/core#hasResearchArea"), URIRef("http://vivo.school.edu/individual/n157098")) in add) def test_multiple_one_change_nothing(self): from testgraph import TestGraph # Do nothing if the multiple values specified match those in VIVO p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'types': u'fac;person'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 0) def test_multiple_one_change(self): from testgraph import TestGraph from rdflib import URIRef # Change the set of values adding one and removing another p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'types': u'person;pd'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue(len(add) == 1 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n1723097935"), URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), URIRef("http://vivoweb.org/ontology/core#Postdoc")) in add and (URIRef("http://vivo.school.edu/individual/n1723097935"), URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), URIRef("http://vivoweb.org/ontology/core#FacultyMember")) in sub) def test_multiple_one_delete(self): from testgraph import TestGraph # Empty the set of values p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n25674', u'research_areas': u'None'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 4) class UpdateURITestCase(unittest.TestCase): def test_uri_not_found(self): from testgraph import TestGraph # Use the URI when not found p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1723097936', u'types': u'fac;person'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue(len(add) == 2 and len(sub) == 0) def test_uri_is_blank(self): from testgraph import TestGraph # Use the URI when not found p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u' ', u'types': u'fac;person'}} p.original_graph = TestGraph() [add, sub] = p.update() self.assertTrue(len(add) == 2 and len(sub) == 0) def test_uri_is_invalid(self): from testgraph import TestGraph # Use the URI when not found p = Pump("data/person_def.json") p.update_data = {1: {u'uri': u'not a uri', u'types': u'fac;person'}} p.original_graph = TestGraph() with self.assertRaises(Exception): [add, sub] = p.update() print add, sub class BooleanColumnTestCase(unittest.TestCase): def test_summarize(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() print p.update_def p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'any1': u'1'}} print p.summarize() # No exception thrown by summarize def test_add(self): from testgraph import TestGraph from rdflib import URIRef p = Pump("data/person_def.json") p.original_graph = TestGraph() print p.update_def p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'any1': u'y'}} [add, sub] = p.update() self.assertTrue(len(add) == 1 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n1723097935"), URIRef("http://vivoweb.org/ontology/core#hasResearchArea"), URIRef("http://any1")) in add) def test_remove(self): from testgraph import TestGraph from rdflib import URIRef p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n25674', u'any1': u'n'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 1 and (URIRef("http://vivo.school.edu/individual/n25674"), URIRef("http://vivoweb.org/ontology/core#hasResearchArea"), URIRef("http://any1")) in sub) class ClosureTestCase(unittest.TestCase): def test_read_closure(self): Pump("data/teaching_def.json") self.assertTrue(True) # No exception thrown when reading a def with a closure def test_normal_closure(self): from testgraph import TestGraph from rdflib import URIRef p = Pump("data/teaching_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'', u'instructor': 'http://orcid.org/0000-0002-1305-8447', u'course': 'Introduction to Statistics', u'start_date': '2012', u'end_date': '2013'}} [add, sub] = p.update() self.assertTrue(len(add) == 8 and len(sub) == 0 and (URIRef("http://vivo.school.edu/individual/n25674"), URIRef("http://purl.obolibrary.org/obo/BFO_0000056"), URIRef("http://vivo.school.edu/individual/n7501")) in add) class PumpMergeTestCase(unittest.TestCase): def test_show_merge(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n25674', u'action': u'b'}, 2: {u'uri': u'http://vivo.school.edu/individual/n709', u'action': u'a1'}, 3: {u'uri': u'http://vivo.school.edu/individual/n710', u'action': u''}, 4: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'action': u'a1'}, 5: {u'uri': u'http://vivo.school.edu/individual/n2084211328', u'action': u'a'}, 6: {u'uri': u'http://vivo.school.edu/individual/n708', u'action': u'b1'}, 7: {u'uri': u'http://vivo.school.edu/individual/n711', u'action': u'a1'}, } [add, sub] = p.update() self.assertTrue(len(add) == 2 and len(sub) == 6) def test_no_primary_merge(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {3: {u'uri': u'http://vivo.school.edu/individual/n710', u'action': u''}, 4: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'action': u'a1'}, 7: {u'uri': u'http://vivo.school.edu/individual/n711', u'action': u'a1'}, } [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 0) def test_no_secondary_merge(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {3: {u'uri': u'http://vivo.school.edu/individual/n710', u'action': u''}, 4: {u'uri': u'http://vivo.school.edu/individual/n1723097935', u'action': u'a'}, 7: {u'uri': u'http://vivo.school.edu/individual/n711', u'action': u'a'}, } [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 0) class PumpRemoveTestCase(unittest.TestCase): def test_uri_not_found_case(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n20845', u'action': u'remove'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 0) def test_single_uri_case(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n2084211328', u'action': u'Remove'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 1) def test_large_case(self): from testgraph import TestGraph p = Pump("data/person_def.json") p.original_graph = TestGraph() p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n25674', u'action': u'REMOVE'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 8) def test_not_found(self): from rdflib import Graph p = Pump("data/person_def.json") p.original_graph = Graph() # empty graph p.update_data = {1: {u'uri': u'http://vivo.school.edu/individual/n12345678', u'action': u'Remove'}} [add, sub] = p.update() self.assertTrue(len(add) == 0 and len(sub) == 0) class PumpEnumTestCase(unittest.TestCase): def test_normal_case(self): p = Pump("data/person_def.json") summary = p.summarize() self.assertTrue(summary.find('people_types') > -1) class CreateEnumTestCase(unittest.TestCase): def test_normal_case(self): from pump.vivopump import create_enum import os filename = "data/__test_create_enum.txt" query = "select ?short ?vivo where {?vivo a foaf:Person . ?vivo rdfs:label ?short .} ORDER BY ?short" create_enum(filename, query, QUERY_PARMS) self.assertTrue(os.path.isfile(filename)) os.remove(filename) class PubMedTest(unittest.TestCase): def test_get_person_vivo_pmids(self): from pubmed.pubmed import get_person_vivo_pmids result = get_person_vivo_pmids("http://vivo.school.edu/individual/n1133", QUERY_PARMS) print result self.assertTrue(len("PMIDList") > 0) def test_get_catalyst_pmids(self): from pubmed.pubmed import get_catalyst_pmids result = get_catalyst_pmids(first="Michael", middle="", last="Conlon", email=["[email protected]", "[email protected]"], affiliation=["%university of florida%", "%ufl.edu%"]) print result self.assertTrue(len(result) > 0) def test_catalyst_getpmids_xml(self): from pubmed.pubmed import catalyst_getpmids_xml result = catalyst_getpmids_xml(first="David", middle="R", last="Nelson", email=["[email protected]"], affiliation=["%University of Florida%"]) print result self.assertTrue(result.find("PMIDList") > 0) if __name__ == "__main__": unittest.main()
gpl-3.0
-1,623,289,697,235,954,200
41.088835
142
0.548826
false
jadsonjs/DataScience
python/arrays_dimesion.py
1
2664
# Consider the case where you have one sequence of multiple time steps and one feature. from numpy import array data = array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) # We can then use the reshape() function on the NumPy array to reshape this one-dimensional array # into a three-dimensional array with 1 sample, 10 time steps, and 1 feature at each time step. data = data.reshape((1, 10, 1)) print(data.shape) # Consider the case where you have multiple parallel series as input for your model. # For example, this could be two parallel series of 10 values: #series 1: 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 #series 2: 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1 from numpy import array data = array([ [0.1, 1.0], [0.2, 0.9], [0.3, 0.8], [0.4, 0.7], [0.5, 0.6], [0.6, 0.5], [0.7, 0.4], [0.8, 0.3], [0.9, 0.2], [1.0, 0.1]]) #This data can be framed as 1 sample with 10 time steps and 2 features. #It can be reshaped as a 3D array as follows: #model = Sequential() #model.add(LSTM(32, input_shape=(10, 2))) #model.add(Dense(1)) data = data.reshape(1, 10, 2) print(data.shape) #Here, we have 25 samples, 200 time steps per sample, and 1 feature # split into samples (e.g. 5000/200 = 25) samples = list() length = 200 # step over the 5,000 in jumps of 200 for i in range(0,n,length): # grab from i to i + 200 sample = data[i:i+length] samples.append(sample) print(len(samples)) data = array(samples) print(data.shape) # reshape into [samples, timesteps, features] # expect [25, 200, 1] data = data.reshape((len(samples), length, 1)) print(data.shape) #https://machinelearningmastery.com/reshape-input-data-long-short-term-memory-networks-keras/ # #For a feed-forward network, your input has the shape (number of samples, number of features). With an LSTM/RNN, you add a time dimension, #and your input shape becomes (number of samples, number of timesteps, number of features). This is in the documentation. #So if your feature dimension is 5, and you have 2 timesteps, your input could look like #[ [ # [1,2,3,4,5], # [2,3,4,5,6] # ], # [ # [2,4,6,8,0], # [9,8,7,6,5] # ] #] #Your output shape depends on how you configure the net. If your LSTM/RNN has return_sequences=False, you'll have one label #per sequence; #if you set return_sequences=True, you'll have one label per timestep. #So in the example, [ [[1,2,3,4,5], [2,3,4,5,6]], [[2,4,6,8,0], [9,8,7,6,5]] ] #input_shape is (2, 2, 5). #And a 'sequence' is '[[1,2,3,4,5], [2,3,4,5,6]]' I assume. #and has 2 timesteps
apache-2.0
-1,277,704,131,615,162,400
25.64
139
0.631381
false
aldebaran/qibuild
python/qisys/actions/__init__.py
1
1279
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license (see the COPYING file). """ Common tools for actions """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import sys import qisys import qisys.command from qisys import ui def foreach(projects, cmd, ignore_errors=True): """ Execute the command on every project :param ignore_errors: whether to stop at first failure """ errors = list() ui.info(ui.green, "Running `%s` on every project" % " ".join(cmd)) for i, project in enumerate(projects): ui.info_count(i, len(projects), ui.blue, project.src) command = cmd[:] try: qisys.command.call(command, cwd=project.path) except qisys.command.CommandFailedException: if ignore_errors: errors.append(project) continue else: raise if not errors: return print() ui.info(ui.red, "Command failed on the following projects:") for project in errors: ui.info(ui.green, " * ", ui.reset, ui.blue, project.src) sys.exit(1)
bsd-3-clause
2,079,718,214,788,950,800
30.195122
84
0.632525
false
SegundoBob/GNXrepeats
hrngpS2.py
1
1831
#!/usr/bin/python #coding=utf-8 #@+leo-ver=5-thin #@+node:bob07.20140715160011.1575: * @file hrngpS2.py #@@first #@@first #@@language python #@@tabwidth -4 import os import sys import leo.core.leoBridge as leoBridge from leo_lib import lib_leo03 #@+others #@+node:bob07.20140715160011.1576: ** gnxRepeats() def gnxRepeats(cmdrx, infoList): hrnGnx = cmdrx.hiddenRootNode.gnx gnxDict = {hrnGnx: cmdrx.hiddenRootNode.h} errorFlag = False for vnode in lib_leo03.bffvWalk(cmdrx): hdr = '"{0}"'.format(vnode.h) if vnode.gnx in gnxDict: errorFlag = True hdr = '"{0}" {1}'.format(gnxDict[vnode.gnx], hdr) gnxDict[vnode.gnx] = hdr infoList.append('Error: {0}'.format(errorFlag)) gnxList = gnxDict.keys() gnxList.sort() for gnx in gnxList: infoList.append('{gnx} {hdrs}'.format(gnx=gnx, hdrs=gnxDict[gnx])) #@-others TestDir = 'hidden_root_tsts' def main(): infoList = list() fpn1 = sys.argv[1] bridge = leoBridge.controller(gui='nullGui', verbose=False, loadPlugins=False, readSettings=False) leoG = bridge.globals() infoList.append('After bridge create: {0}'.format(leoG.app.nodeIndices.lastIndex)) cmdr1 = bridge.openLeoFile(fpn1) infoList.append('After {fpn} open: {idx}'.format(fpn=fpn1, idx=leoG.app.nodeIndices.lastIndex)) rp = cmdr1.rootPosition() posx = rp.insertAfter() posx.h = '{cnt} - {idx}'.format(cnt=2, idx=1) infoList.append('After adding 1 vnode: {idx}'.format(fpn=fpn1, idx=leoG.app.nodeIndices.lastIndex)) gnxRepeats(cmdr1, infoList) cmdr1.save() cmdr1.close() fpnError = os.path.join(TestDir, 'SlaveLog.txt') fdError = open(fpnError, 'w') fdError.write('\n'.join(infoList) + '\n') fdError.close() if __name__ == "__main__": main() #@-leo
mit
6,549,371,217,770,367,000
27.609375
103
0.647187
false
SINGROUP/pycp2k
pycp2k/classes/_opt_ri_basis3.py
1
1543
from pycp2k.inputsection import InputSection class _opt_ri_basis3(InputSection): def __init__(self): InputSection.__init__(self) self.Delta_i_rel = None self.Delta_ri = None self.Eps_deriv = None self.Max_iter = None self.Num_func = None self.Basis_size = None self._name = "OPT_RI_BASIS" self._keywords = {'Num_func': 'NUM_FUNC', 'Delta_i_rel': 'DELTA_I_REL', 'Basis_size': 'BASIS_SIZE', 'Delta_ri': 'DELTA_RI', 'Eps_deriv': 'EPS_DERIV', 'Max_iter': 'MAX_ITER'} self._aliases = {'Max_num_iter': 'Max_iter', 'Dri': 'Delta_ri', 'Di_rel': 'Delta_i_rel', 'Eps_num_deriv': 'Eps_deriv'} @property def Di_rel(self): """ See documentation for Delta_i_rel """ return self.Delta_i_rel @property def Dri(self): """ See documentation for Delta_ri """ return self.Delta_ri @property def Eps_num_deriv(self): """ See documentation for Eps_deriv """ return self.Eps_deriv @property def Max_num_iter(self): """ See documentation for Max_iter """ return self.Max_iter @Di_rel.setter def Di_rel(self, value): self.Delta_i_rel = value @Dri.setter def Dri(self, value): self.Delta_ri = value @Eps_num_deriv.setter def Eps_num_deriv(self, value): self.Eps_deriv = value @Max_num_iter.setter def Max_num_iter(self, value): self.Max_iter = value
lgpl-3.0
7,370,216,220,963,202,000
24.716667
181
0.552819
false
uglyfruitcake/Axelrod
axelrod/tests/unit/test_cooperator.py
1
1499
"""Test for the cooperator strategy.""" import axelrod from .test_player import TestPlayer C, D = axelrod.Actions.C, axelrod.Actions.D class TestCooperator(TestPlayer): name = "Cooperator" player = axelrod.Cooperator expected_classifier = { 'memory_depth': 0, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Simply does the opposite to what the strategy did last time.""" self.markov_test([C, C, C, C]) class TestTrickyCooperator(TestPlayer): name = "Tricky Cooperator" player = axelrod.TrickyCooperator expected_classifier = { 'memory_depth': 10, 'stochastic': False, 'inspects_source': False, 'manipulates_source': False, 'manipulates_state': False } def test_strategy(self): """Starts by cooperating.""" self.first_play_test(C) def test_effect_of_strategy(self): """Test if it tries to trick opponent""" self.responses_test([C, C, C], [C, C, C], [D]) self.responses_test([C, C, C, D, D], [C, C, C, C, D], [C]) history = [C, C, C, D, D] + [C] * 11 opponent_histroy = [C, C, C, C, D] + [D] + [C] * 10 self.responses_test(history, opponent_histroy,[D])
mit
-220,543,415,429,763,360
27.283019
74
0.588392
false
souravbadami/oppia
core/storage/base_model/gae_models.py
1
36051
# Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base model class.""" from constants import constants from core.platform import models import utils from google.appengine.datastore import datastore_query from google.appengine.ext import ndb transaction_services = models.Registry.import_transaction_services() # The delimiter used to separate the version number from the model instance # id. To get the instance id from a snapshot id, use Python's rfind() # method to find the location of this delimiter. _VERSION_DELIMITER = '-' # Constants used for generating ids. MAX_RETRIES = 10 RAND_RANGE = (1 << 30) - 1 ID_LENGTH = 12 class BaseModel(ndb.Model): """Base model for all persistent object storage classes.""" # When this entity was first created. This can be overwritten and # set explicitly. created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True) # When this entity was last updated. This cannot be set directly. last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True) # Whether the current version of the model instance is deleted. deleted = ndb.BooleanProperty(indexed=True, default=False) @property def id(self): """A unique id for this model instance.""" return self.key.id() def _pre_put_hook(self): """This is run before model instances are saved to the datastore. Subclasses of BaseModel should override this method. """ pass class EntityNotFoundError(Exception): """Raised when no entity for a given id exists in the datastore.""" pass @staticmethod def export_data(user_id): """This method should be implemented by subclasses. Args: user_id: str. The ID of the user whose data should be exported. Raises: NotImplementedError: The method is not overwritten in derived classes. """ raise NotImplementedError @classmethod def get(cls, entity_id, strict=True): """Gets an entity by id. Args: entity_id: str. strict: bool. Whether to fail noisily if no entity with the given id exists in the datastore. Default is True. Returns: None, if strict == False and no undeleted entity with the given id exists in the datastore. Otherwise, the entity instance that corresponds to the given id. Raises: base_models.BaseModel.EntityNotFoundError: if strict == True and no undeleted entity with the given id exists in the datastore. """ entity = cls.get_by_id(entity_id) if entity and entity.deleted: entity = None if strict and entity is None: raise cls.EntityNotFoundError( 'Entity for class %s with id %s not found' % (cls.__name__, entity_id)) return entity @classmethod def get_multi(cls, entity_ids, include_deleted=False): """Gets list of entities by list of ids. Args: entity_ids: list(str). include_deleted: bool. Whether to include deleted entities in the return list. Default is False. Returns: list(*|None). A list that contains model instances that match the corresponding entity_ids in the input list. If an instance is not found, or it has been deleted and include_deleted is False, then the corresponding entry is None. """ entity_keys = [] none_argument_indices = [] for index, entity_id in enumerate(entity_ids): if entity_id: entity_keys.append(ndb.Key(cls, entity_id)) else: none_argument_indices.append(index) entities = ndb.get_multi(entity_keys) for index in none_argument_indices: entities.insert(index, None) if not include_deleted: for i in xrange(len(entities)): if entities[i] and entities[i].deleted: entities[i] = None return entities @classmethod def put_multi(cls, entities): """Stores the given ndb.Model instances. Args: entities: list(ndb.Model). """ ndb.put_multi(entities) @classmethod def delete_multi(cls, entities): """Deletes the given ndb.Model instances. Args: entities: list(ndb.Model). """ keys = [entity.key for entity in entities] ndb.delete_multi(keys) def delete(self): """Deletes this instance.""" super(BaseModel, self).key.delete() @classmethod def get_all(cls, include_deleted=False): """Gets iterable of all entities of this class. Args: include_deleted: bool. If True, then entities that have been marked deleted are returned as well. Defaults to False. Returns: iterable. Filterable iterable of all entities of this class. """ query = cls.query() if not include_deleted: query = query.filter(cls.deleted == False) # pylint: disable=singleton-comparison return query @classmethod def get_new_id(cls, entity_name): """Gets a new id for an entity, based on its name. The returned id is guaranteed to be unique among all instances of this entity. Args: entity_name: The name of the entity. Coerced to a utf-8 encoded string. Defaults to ''. Returns: str. New unique id for this entity class. Raises: Exception: An ID cannot be generated within a reasonable number of attempts. """ try: entity_name = unicode(entity_name).encode(encoding='utf-8') except Exception: entity_name = '' for _ in range(MAX_RETRIES): new_id = utils.convert_to_hash( '%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)), ID_LENGTH) if not cls.get_by_id(new_id): return new_id raise Exception('New id generator is producing too many collisions.') @classmethod def _fetch_page_sorted_by_last_updated( cls, query, page_size, urlsafe_start_cursor): """Fetches a page of entities sorted by their last_updated attribute in descending order (newly updated first). Args: query: ndb.Query. page_size: int. The maximum number of entities to be returned. urlsafe_start_cursor: str or None. If provided, the list of returned entities starts from this datastore cursor. Otherwise, the returned entities start from the beginning of the full list of entities. Returns: 3-tuple of (results, cursor, more) as described in fetch_page() at: https://developers.google.com/appengine/docs/python/ndb/queryclass, where: results: List of query results. cursor: str or None. A query cursor pointing to the next batch of results. If there are no more results, this will be None. more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ if urlsafe_start_cursor: start_cursor = datastore_query.Cursor(urlsafe=urlsafe_start_cursor) else: start_cursor = None result = query.order(-cls.last_updated).fetch_page( page_size, start_cursor=start_cursor) return ( result[0], (result[1].urlsafe() if result[1] else None), result[2]) class BaseCommitLogEntryModel(BaseModel): """Base Model for the models that store the log of commits to a construct. """ # Update superclass model to make these properties indexed. created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True) last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True) # The id of the user. user_id = ndb.StringProperty(indexed=True, required=True) # The username of the user, at the time of the edit. username = ndb.StringProperty(indexed=True, required=True) # The type of the commit: 'create', 'revert', 'edit', 'delete'. commit_type = ndb.StringProperty(indexed=True, required=True) # The commit message. commit_message = ndb.TextProperty(indexed=False) # The commit_cmds dict for this commit. commit_cmds = ndb.JsonProperty(indexed=False, required=True) # The status of the entity after the edit event ('private', 'public'). post_commit_status = ndb.StringProperty(indexed=True, required=True) # Whether the entity is community-owned after the edit event. post_commit_community_owned = ndb.BooleanProperty(indexed=True) # Whether the entity is private after the edit event. Having a # separate field for this makes queries faster, since an equality query # on this property is faster than an inequality query on # post_commit_status. post_commit_is_private = ndb.BooleanProperty(indexed=True) # The version number of the model after this commit. version = ndb.IntegerProperty() @classmethod def create( cls, entity_id, version, committer_id, committer_username, commit_type, commit_message, commit_cmds, status, community_owned): """This method returns an instance of the CommitLogEntryModel for a construct with the common fields filled. Args: entity_id: str. The ID of the construct corresponding to this commit log entry model (e.g. the exp_id for an exploration, the story_id for a story, etc.). version: int. The version number of the model after the commit. committer_id: str. The user_id of the user who committed the change. committer_username: str. The username of the user who committed the change. commit_type: str. The type of commit. Possible values are in core.storage.base_models.COMMIT_TYPE_CHOICES. commit_message: str. The commit description message. commit_cmds: list(dict). A list of commands, describing changes made in this model, which should give sufficient information to reconstruct the commit. Each dict always contains: cmd: str. Unique command. and then additional arguments for that command. status: str. The status of the entity after the commit. community_owned: bool. Whether the entity is community_owned after the commit. Returns: CommitLogEntryModel. Returns the respective CommitLogEntryModel instance of the construct from which this is called. """ return cls( id=cls._get_instance_id(entity_id, version), user_id=committer_id, username=committer_username, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds, version=version, post_commit_status=status, post_commit_community_owned=community_owned, post_commit_is_private=( status == constants.ACTIVITY_STATUS_PRIVATE) ) @classmethod def _get_instance_id(cls, target_entity_id, version): """This method should be implemented in the inherited classes. Args: target_entity_id: str. The ID of the construct corresponding to this commit log entry model (e.g. the exp_id for an exploration, the story_id for a story, etc.). version: int. The version number of the model after the commit. Raises: NotImplementedError: The method is not overwritten in derived classes. """ raise NotImplementedError @classmethod def get_all_commits(cls, page_size, urlsafe_start_cursor): """Fetches a list of all the commits sorted by their last updated attribute. Args: page_size: int. The maximum number of entities to be returned. urlsafe_start_cursor: str or None. If provided, the list of returned entities starts from this datastore cursor. Otherwise, the returned entities start from the beginning of the full list of entities. Returns: 3-tuple of (results, cursor, more) as described in fetch_page() at: https://developers.google.com/appengine/docs/python/ndb/queryclass, where: results: List of query results. cursor: str or None. A query cursor pointing to the next batch of results. If there are no more results, this might be None. more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ return cls._fetch_page_sorted_by_last_updated( cls.query(), page_size, urlsafe_start_cursor) @classmethod def get_commit(cls, target_entity_id, version): """Returns the commit corresponding to an instance id and version number. Args: target_entity_id: str. The ID of the construct corresponding to this commit log entry model (e.g. the exp_id for an exploration, the story_id for a story, etc.). version: int. The version number of the instance after the commit. Returns: BaseCommitLogEntryModel. The commit with the target entity id and version number. """ commit_id = cls._get_instance_id(target_entity_id, version) return cls.get_by_id(commit_id) class VersionedModel(BaseModel): """Model that handles storage of the version history of model instances. To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a SNAPSHOT_CONTENT_CLASS. The former must contain the String fields 'committer_id', 'commit_type' and 'commit_message', and a JSON field for the Python list of dicts, 'commit_cmds'. The latter must contain the JSON field 'content'. The item that is being versioned must be serializable to a JSON blob. Note that commit() should be used for VersionedModels, as opposed to put() for direct subclasses of BaseModel. """ # The class designated as the snapshot model. This should be a subclass of # BaseSnapshotMetadataModel. SNAPSHOT_METADATA_CLASS = None # The class designated as the snapshot content model. This should be a # subclass of BaseSnapshotContentModel. SNAPSHOT_CONTENT_CLASS = None # Whether reverting is allowed. Default is False. ALLOW_REVERT = False # IMPORTANT: Subclasses should only overwrite things above this line. # The possible commit types. _COMMIT_TYPE_CREATE = 'create' _COMMIT_TYPE_REVERT = 'revert' _COMMIT_TYPE_EDIT = 'edit' _COMMIT_TYPE_DELETE = 'delete' # A list containing the possible commit types. COMMIT_TYPE_CHOICES = [ _COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT, _COMMIT_TYPE_DELETE ] # The reserved prefix for keys that are automatically inserted into a # commit_cmd dict by this model. _AUTOGENERATED_PREFIX = 'AUTO' # The command string for a revert commit. CMD_REVERT_COMMIT = '%s_revert_version_number' % _AUTOGENERATED_PREFIX # The command string for a delete commit. CMD_DELETE_COMMIT = '%s_mark_deleted' % _AUTOGENERATED_PREFIX # The current version number of this instance. In each PUT operation, # this number is incremented and a snapshot of the modified instance is # stored in the snapshot metadata and content models. The snapshot # version number starts at 1 when the model instance is first created. # All data in this instance represents the version at HEAD; data about the # previous versions is stored in the snapshot models. version = ndb.IntegerProperty(default=0) def _require_not_marked_deleted(self): """Checks whether the model instance is deleted.""" if self.deleted: raise Exception('This model instance has been deleted.') def _compute_snapshot(self): """Generates a snapshot (dict) from the model property values.""" return self.to_dict(exclude=['created_on', 'last_updated']) def _reconstitute(self, snapshot_dict): """Populates the model instance with the snapshot. Args: snapshot_dict: dict(str, *). The snapshot with the model property values. Returns: VersionedModel. The instance of the VersionedModel class populated with the the snapshot. """ self.populate(**snapshot_dict) return self def _reconstitute_from_snapshot_id(self, snapshot_id): """Gets a reconstituted instance of this model class, based on the given snapshot id. Args: snapshot_id: str. Returns: VersionedModel. Reconstituted instance. """ snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id) snapshot_dict = snapshot_model.content reconstituted_model = self._reconstitute(snapshot_dict) # TODO(sll): The 'created_on' and 'last_updated' values here will be # slightly different from the values the entity model would have had, # since they correspond to the corresponding fields for the snapshot # content model instead. Figure out whether this is a problem or not, # and whether we need to record the contents of those fields in the # actual entity model (in which case we also need a way to deal with # old snapshots that don't have this information). reconstituted_model.created_on = snapshot_model.created_on reconstituted_model.last_updated = snapshot_model.last_updated return reconstituted_model @classmethod def _get_snapshot_id(cls, instance_id, version_number): """Gets a unique snapshot id for this instance and version. Args: instance_id: str. version_number: int. Returns: str. The unique snapshot id corresponding to the given instance and version. """ return '%s%s%s' % ( instance_id, _VERSION_DELIMITER, version_number) def _trusted_commit( self, committer_id, commit_type, commit_message, commit_cmds): """Evaluates and executes commit. Main function for all commit types. Args: committer_id: str. The user_id of the user who committed the change. commit_type: str. Unique identifier of commit type. Possible values are in COMMIT_TYPE_CHOICES. commit_message: str. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Dict always contains: cmd: str. Unique command. And then additional arguments for that command. For example: {'cmd': 'AUTO_revert_version_number' 'version_number': 4} Raises: Exception: No snapshot metadata class has been defined. Exception: No snapshot content class has been defined. Exception: commit_cmds is not a list of dicts. """ if self.SNAPSHOT_METADATA_CLASS is None: raise Exception('No snapshot metadata class defined.') if self.SNAPSHOT_CONTENT_CLASS is None: raise Exception('No snapshot content class defined.') if not isinstance(commit_cmds, list): raise Exception( 'Expected commit_cmds to be a list of dicts, received %s' % commit_cmds) self.version += 1 snapshot = self._compute_snapshot() snapshot_id = self._get_snapshot_id(self.id, self.version) snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS( # pylint: disable=not-callable id=snapshot_id, committer_id=committer_id, commit_type=commit_type, commit_message=commit_message, commit_cmds=commit_cmds) snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS( # pylint: disable=not-callable id=snapshot_id, content=snapshot) transaction_services.run_in_transaction( ndb.put_multi, [snapshot_metadata_instance, snapshot_content_instance, self]) def delete(self, committer_id, commit_message, force_deletion=False): """Deletes this model instance. Args: committer_id: str. The user_id of the user who committed the change. commit_message: str. force_deletion: bool. If True this model is deleted completely from storage, otherwise it is only marked as deleted. Default is False. Raises: Exception: This model instance has been already deleted. """ if force_deletion: current_version = self.version version_numbers = [str(num + 1) for num in range(current_version)] snapshot_ids = [ self._get_snapshot_id(self.id, version_number) for version_number in version_numbers] metadata_keys = [ ndb.Key(self.SNAPSHOT_METADATA_CLASS, snapshot_id) for snapshot_id in snapshot_ids] ndb.delete_multi(metadata_keys) content_keys = [ ndb.Key(self.SNAPSHOT_CONTENT_CLASS, snapshot_id) for snapshot_id in snapshot_ids] ndb.delete_multi(content_keys) super(VersionedModel, self).delete() else: self._require_not_marked_deleted() # pylint: disable=protected-access self.deleted = True commit_cmds = [{ 'cmd': self.CMD_DELETE_COMMIT }] self._trusted_commit( committer_id, self._COMMIT_TYPE_DELETE, commit_message, commit_cmds) def put(self, *args, **kwargs): """For VersionedModels, this method is replaced with commit().""" raise NotImplementedError def commit(self, committer_id, commit_message, commit_cmds): """Saves a version snapshot and updates the model. Args: committer_id: str. The user_id of the user who committed the change. commit_message: str. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Dict always contains: cmd: str. Unique command. And then additional arguments for that command. For example: {'cmd': 'AUTO_revert_version_number' 'version_number': 4} Raises: Exception: This model instance has been already deleted. Exception: commit_cmd is in invalid format. """ self._require_not_marked_deleted() for item in commit_cmds: if not isinstance(item, dict): raise Exception( 'Expected commit_cmds to be a list of dicts, received %s' % commit_cmds) for commit_cmd in commit_cmds: if 'cmd' not in commit_cmd: raise Exception( 'Invalid commit_cmd: %s. Expected a \'cmd\' key.' % commit_cmd) if commit_cmd['cmd'].startswith(self._AUTOGENERATED_PREFIX): raise Exception( 'Invalid change list command: %s' % commit_cmd['cmd']) commit_type = ( self._COMMIT_TYPE_CREATE if self.version == 0 else self._COMMIT_TYPE_EDIT) self._trusted_commit( committer_id, commit_type, commit_message, commit_cmds) @classmethod def revert(cls, model, committer_id, commit_message, version_number): """Reverts model to previous version. Args: model: VersionedModel. committer_id: str. The user_id of the user who committed the change. commit_message: str. version_number: int. Version to revert to. Raises: Exception: This model instance has been deleted. Exception: Reverting is not allowed on this model. """ model._require_not_marked_deleted() # pylint: disable=protected-access if not model.ALLOW_REVERT: raise Exception( 'Reverting objects of type %s is not allowed.' % model.__class__.__name__) commit_cmds = [{ 'cmd': model.CMD_REVERT_COMMIT, 'version_number': version_number }] # Do not overwrite the version number. current_version = model.version # If a new property is introduced after a certain version of a model, # the property should be its default value when an old snapshot of the # model is applied during reversion. E.g. states_schema_version in # ExplorationModel may be added after some version of a saved # exploration. If that exploration is reverted to a version that does # not have a states_schema_version property, it should revert to the # default states_schema_version value rather than taking the # states_schema_version value from the latest exploration version. # pylint: disable=protected-access snapshot_id = model._get_snapshot_id(model.id, version_number) new_model = cls(id=model.id) new_model._reconstitute_from_snapshot_id(snapshot_id) new_model.version = current_version new_model._trusted_commit( committer_id, cls._COMMIT_TYPE_REVERT, commit_message, commit_cmds) # pylint: enable=protected-access @classmethod def get_version(cls, entity_id, version_number): """Gets model instance representing the given version. The snapshot content is used to populate this model instance. The snapshot metadata is not used. Args: entity_id: str. version_number: int. Returns: VersionedModel. Model instance representing given version. Raises: Exception: This model instance has been deleted. """ # pylint: disable=protected-access cls.get(entity_id)._require_not_marked_deleted() snapshot_id = cls._get_snapshot_id(entity_id, version_number) return cls( id=entity_id, version=version_number)._reconstitute_from_snapshot_id(snapshot_id) # pylint: enable=protected-access @classmethod def get_multi_versions(cls, entity_id, version_numbers): """Gets model instances for each version specified in version_numbers. Args: entity_id: str. ID of the entity. version_numbers: list(int). List of version numbers. Returns: list(VersionedModel). Model instances representing the given versions. Raises: ValueError. The given entity_id is invalid. ValueError. Requested version number cannot be higher than the current version number. ValueError. At least one version number is invalid. """ instances = [] entity = cls.get(entity_id, strict=False) if not entity: raise ValueError('The given entity_id %s is invalid.' % (entity_id)) current_version = entity.version max_version = max(version_numbers) if max_version > current_version: raise ValueError( 'Requested version number %s cannot be higher than the current ' 'version number %s.' % (max_version, current_version)) snapshot_ids = [] # pylint: disable=protected-access for version in version_numbers: snapshot_id = cls._get_snapshot_id(entity_id, version) snapshot_ids.append(snapshot_id) snapshot_models = cls.SNAPSHOT_CONTENT_CLASS.get_multi(snapshot_ids) for snapshot_model in snapshot_models: if snapshot_model is None: raise ValueError( 'At least one version number is invalid.') snapshot_dict = snapshot_model.content reconstituted_model = cls(id=entity_id)._reconstitute( snapshot_dict) reconstituted_model.created_on = snapshot_model.created_on reconstituted_model.last_updated = snapshot_model.last_updated instances.append(reconstituted_model) # pylint: enable=protected-access return instances @classmethod def get(cls, entity_id, strict=True, version=None): """Gets model instance. Args: entity_id: str. strict: bool. Whether to fail noisily if no entity with the given id exists in the datastore. Default is True. version: int. Version we want to get. Default is None. Returns: VersionedModel. If version is None, get the newest version of the model. Otherwise, get the specified version. """ if version is None: return super(VersionedModel, cls).get(entity_id, strict=strict) else: return cls.get_version(entity_id, version) @classmethod def get_snapshots_metadata( cls, model_instance_id, version_numbers, allow_deleted=False): """Gets a list of dicts, each representing a model snapshot. One dict is returned for each version number in the list of version numbers requested. If any of the version numbers does not exist, an error is raised. Args: model_instance_id: str. Id of requested model. version_numbers: list(int). List of version numbers. allow_deleted: bool. If is False, an error is raised if the current model has been deleted. Default is False. Returns: list(dict). Each dict contains metadata for a particular snapshot. It has the following keys: committer_id: str. The user_id of the user who committed the change. commit_message: str. commit_cmds: list(dict). A list of commands, describing changes made in this model, should give sufficient information to reconstruct the commit. Dict always contains: cmd: str. Unique command. And then additional arguments for that command. For example: {'cmd': 'AUTO_revert_version_number' 'version_number': 4} commit_type: str. Unique identifier of commit type. Possible values are in COMMIT_TYPE_CHOICES. version_number: int. created_on_ms: float. Snapshot creation time in milliseconds since the Epoch. Raises: Exception: There is no model instance corresponding to at least one of the given version numbers. """ # pylint: disable=protected-access if not allow_deleted: cls.get(model_instance_id)._require_not_marked_deleted() snapshot_ids = [ cls._get_snapshot_id(model_instance_id, version_number) for version_number in version_numbers] # pylint: enable=protected-access metadata_keys = [ ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id) for snapshot_id in snapshot_ids] returned_models = ndb.get_multi(metadata_keys) for ind, model in enumerate(returned_models): if model is None: raise Exception( 'Invalid version number %s for model %s with id %s' % (version_numbers[ind], cls.__name__, model_instance_id)) return [{ 'committer_id': model.committer_id, 'commit_message': model.commit_message, 'commit_cmds': model.commit_cmds, 'commit_type': model.commit_type, 'version_number': version_numbers[ind], 'created_on_ms': utils.get_time_in_millisecs(model.created_on), } for (ind, model) in enumerate(returned_models)] class BaseSnapshotMetadataModel(BaseModel): """Base class for snapshot metadata classes. The id of this model is computed using VersionedModel.get_snapshot_id(). """ # The id of the user who committed this revision. committer_id = ndb.StringProperty(required=True) # The type of the commit associated with this snapshot. commit_type = ndb.StringProperty( required=True, choices=VersionedModel.COMMIT_TYPE_CHOICES) # The commit message associated with this snapshot. commit_message = ndb.TextProperty(indexed=False) # A sequence of commands that can be used to describe this commit. # Represented as a list of dicts. commit_cmds = ndb.JsonProperty(indexed=False) def get_unversioned_instance_id(self): """Gets the instance id from the snapshot id. Returns: str. Instance id part of snapshot id. """ return self.id[:self.id.rfind(_VERSION_DELIMITER)] def get_version_string(self): """Gets the version number from the snapshot id. Returns: str. Version number part of snapshot id. """ return self.id[self.id.rfind(_VERSION_DELIMITER) + 1:] class BaseSnapshotContentModel(BaseModel): """Base class for snapshot content classes. The id of this model is computed using VersionedModel.get_snapshot_id(). """ # The snapshot content, as a JSON blob. content = ndb.JsonProperty(indexed=False) def get_unversioned_instance_id(self): """Gets the instance id from the snapshot id. Returns: str. Instance id part of snapshot id. """ return self.id[:self.id.rfind(_VERSION_DELIMITER)] def get_version_string(self): """Gets the version number from the snapshot id. Returns: str. Version number part of snapshot id. """ return self.id[self.id.rfind(_VERSION_DELIMITER) + 1:] class BaseMapReduceBatchResultsModel(BaseModel): """Base model for batch storage for MR jobs. This model turns off caching, because this results in stale data being shown after each MapReduce job run. Classes which are used by a MR job to store its batch results should subclass this class. """ _use_cache = False _use_memcache = False
apache-2.0
8,921,728,561,893,953,000
38.100868
98
0.618596
false
pettarin/penelope
setup.py
1
2657
#!/usr/bin/env python # coding=utf-8 """ Set penelope package up """ from setuptools import Extension from setuptools import setup __author__ = "Alberto Pettarin" __copyright__ = "Copyright 2012-2016, Alberto Pettarin (www.albertopettarin.it)" __license__ = "MIT" __version__ = "3.1.3" __email__ = "[email protected]" __status__ = "Production" setup( name="penelope", packages=["penelope"], package_data={"penelope": ["res/*"]}, version="3.1.3.0", description="Penelope is a multi-tool for creating, editing and converting dictionaries, especially for eReader devices", author="Alberto Pettarin", author_email="[email protected]", url="https://github.com/pettarin/penelope", license="MIT License", long_description=open("README.rst", "r").read(), install_requires=["lxml>=3.0", "marisa-trie>=0.7.2"], scripts=["bin/penelope"], keywords=[ "Dictionary", "Dictionaries", "Index", "Merge", "Flatten", "eReader", "eReaders", "Bookeen", "CSV", "EPUB", "MOBI", "Kindle", "Kobo", "StarDict", "XML", "MARISA", "kindlegen", "dictzip", ], classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Desktop Environment", "Topic :: Documentation", "Topic :: Office/Business", "Topic :: Software Development :: Internationalization", "Topic :: Software Development :: Localization", "Topic :: Text Editors", "Topic :: Text Editors :: Text Processing", "Topic :: Text Processing", "Topic :: Text Processing :: General", "Topic :: Text Processing :: Indexing", "Topic :: Text Processing :: Linguistic", "Topic :: Text Processing :: Markup", "Topic :: Text Processing :: Markup :: HTML", "Topic :: Text Processing :: Markup :: XML", "Topic :: Utilities" ], )
mit
-1,803,094,169,845,251,800
31.012048
125
0.572074
false
DomainDrivenConsulting/dogen
projects/masd.dogen.dia/python/add_to_package.py
1
1625
# -*- mode: python; tab-width: 4; indent-tabs-mode: nil -*- # # Copyright (C) 2012-2015 Marco Craveiro <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # # First locate the parent # for layer in dia.active_display().diagram.data.layers: for object in layer.objects: if object.type.name == "UML - LargePackage": if object.properties["name"].value == "transforms": parent = object print "found parent" # # Then update all objects without a parent. Make sure all classes # that are orphaned actually belong to this package before running. # if parent != None: for layer in dia.active_display().diagram.data.layers: for object in layer.objects: if object.type.name == "UML - Class": if object.parent == None: print object.properties["name"].value object.parent = parent print "done"
gpl-3.0
-5,694,978,738,083,802,000
37.690476
70
0.675077
false
kikocorreoso/mplutils
mplutils/axes.py
1
8516
# -*- coding: utf-8 -*- """ Created on Sun Feb 21 23:43:37 2016 @author: kiko """ from __future__ import division, absolute_import from .settings import RICH_DISPLAY import numpy as np if RICH_DISPLAY: from IPython.display import display def axes_set_better_defaults(ax, axes_color = '#777777', grid = False, show = False): """ Enter an Axes instance and it will change the defaults to an opinionated version of how a simple plot should be. Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance axes_color : str A string indicating a valid matplotlib color. grid : bool If `True` the grid of the axes will be shown, if `False` (default) the grid, if active, will be supressed. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ ax.set_axis_bgcolor((1, 1, 1)) ax.grid(grid) for key in ax.spines.keys(): if ax.spines[key].get_visible(): ax.spines[key].set_color(axes_color) ax.tick_params(axis = 'x', colors = axes_color) ax.tick_params(axis = 'y', colors = axes_color) ax.figure.set_facecolor('white') ax.figure.canvas.draw() if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show() # http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html def axes_set_axis_position(ax, spines = ['bottom', 'left'], pan = 0, show = False): """ Enter an Axes instance and depending the options it will display the axis where you selected. Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance spines : str or iterable A string or an iterable of strings with the following valid options: 'bottom' : To active the bottom x-axis. 'top' : To active the top x-axis. 'left' : To active the left y-axis. 'right' : To active the right y-axis. pan : int or iterable A integer value or an iterable of integer values indicating the value to pan the axis. It has to have the same lenght and the same order than the spines input. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ if np.isscalar(spines): spines = (spines,) len_spines = 1 else: len_spines = len(spines) if np.isscalar(pan): pan = np.repeat(pan, len_spines) len_pan = 1 else: len_pan = len(pan) if len_pan > 1 and len_pan != len_spines: raise ValueError(('Length of `spines` and `pan` mismatch. `pan` ') ('should be a scalar or should have the same length than `spines`.')) i = 0 for loc, spine in ax.spines.items(): if loc in spines: spine.set_position(('outward', pan[i])) # outward by `pan` points spine.set_smart_bounds(True) i += 1 else: #spine.set_color('none') # don't draw spine spine.set_visible(False) # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') ax.tick_params(labelleft = True) if 'right' in spines: ax.yaxis.set_ticks_position('right') ax.tick_params(labelright = True) if 'left' in spines and 'right' in spines: ax.yaxis.set_ticks_position('both') ax.tick_params(labelleft = True, labelright = True) if 'left' not in spines and 'right' not in spines: ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') ax.tick_params(labelbottom = True) if 'top' in spines: ax.xaxis.set_ticks_position('top') ax.tick_params(labeltop = True) if 'bottom' in spines and 'top' in spines: ax.xaxis.set_ticks_position('both') ax.tick_params(labelbottom = True, labeltop = True) if 'bottom' not in spines and 'top' not in spines: ax.xaxis.set_ticks([]) ax.figure.canvas.draw() if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show() def axes_set_origin(ax, x = 0, y = 0, xticks_position = 'bottom', yticks_position = 'left', xticks_visible = True, yticks_visible = True, show = False): """ function to locate x-axis and y-axis on the position you want. Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance x : int or float Value indicating the position on the y-axis where you want the x-axis to be located. y : int or float Value indicating the position on the x-axis where you want the y-axis to be located. xticks_position : str Default value is 'bottom' if you want the ticks to be located below the x-axis. 'top' if you want the ticks to be located above the x-axis. yticks_position : str Default value is 'left' if you want the ticks to be located on the left side of the y-axis. 'right' if you want the ticks to be located on the right side of the y-axis. xticks_visible : bool Default value is True if you want ticks visible on the x-axis. False if you don't want to see the ticks on the x-axis. yticks_visible : bool Default value is True if you want ticks visible on the y-axis. False if you don't want to see the ticks on the y-axis. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position(xticks_position) ax.spines['bottom'].set_position(('data', x)) ax.yaxis.set_ticks_position(yticks_position) ax.spines['left'].set_position(('data', y)) if not xticks_visible: ax.set_xticks([]) if not yticks_visible: ax.set_yticks([]) ax.figure.canvas.draw() if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show() def axes_set_aspect_ratio(ax, ratio = 'equal', show = True): """ function that accepts an Axes instance and update the information setting the aspect ratio of the axis to the defined quantity Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance ratio : str or int/float The value can be a string with the following values: 'equal' : (default) same scaling from data to plot units for x and y 'auto' : automatic; fill position rectangle with data Or a: number (int or float) : a circle will be stretched such that the height is num times the width. aspec t =1 is the same as aspect='equal'. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ ax.set_aspect(ratio, adjustable = None) if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show()
mit
-877,875,270,445,470,800
36.685841
80
0.588539
false
ancafarcas/superdesk-core
superdesk/media/media_operations.py
1
5561
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import arrow import magic import hashlib import logging import requests from bson import ObjectId from io import BytesIO from PIL import Image from flask import json from .image import get_meta, fix_orientation from .video import get_meta as video_meta import base64 from superdesk.errors import SuperdeskApiError logger = logging.getLogger(__name__) def hash_file(afile, hasher, blocksize=65536): buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest() def get_file_name(file): return hash_file(file, hashlib.sha256()) def download_file_from_url(url): rv = requests.get(url, timeout=15) if rv.status_code not in (200, 201): raise SuperdeskApiError.internalError('Failed to retrieve file from URL: %s' % url) mime = magic.from_buffer(rv.content, mime=True) ext = str(mime).split('/')[1] name = str(ObjectId()) + ext return BytesIO(rv.content), name, str(mime) def download_file_from_encoded_str(encoded_str): content = encoded_str.split(';base64,') mime = content[0].split(':')[1] ext = content[0].split('/')[1] name = str(ObjectId()) + ext content = base64.b64decode(content[1]) return BytesIO(content), name, mime def process_file_from_stream(content, content_type=None): content_type = content_type or content.content_type content = BytesIO(content.read()) if 'application/' in content_type: content_type = magic.from_buffer(content.getvalue(), mime=True) content.seek(0) file_type, ext = content_type.split('/') try: metadata = process_file(content, file_type) except OSError: # error from PIL when image is supposed to be an image but is not. raise SuperdeskApiError.internalError('Failed to process file') file_name = get_file_name(content) content.seek(0) metadata = encode_metadata(metadata) metadata.update({'length': json.dumps(len(content.getvalue()))}) return file_name, content_type, metadata def encode_metadata(metadata): return dict((k.lower(), json.dumps(v)) for k, v in metadata.items()) def decode_metadata(metadata): return dict((k.lower(), decode_val(v)) for k, v in metadata.items()) def decode_val(string_val): """Format dates that elastic will try to convert automatically.""" val = json.loads(string_val) try: arrow.get(val, 'YYYY-MM-DD') # test if it will get matched by elastic return str(arrow.get(val)) except (Exception): return val def process_file(content, type): """Retrieves the media file metadata :param BytesIO content: content stream :param str type: type of media file :return: dict metadata related to media file. """ if type == 'image': return process_image(content) if type in ('audio', 'video'): return process_video(content) return {} def process_video(content): """Retrieves the video/audio metadata :param BytesIO content: content stream :return: dict video/audio metadata """ content.seek(0) meta = video_meta(content) content.seek(0) return meta def process_image(content): """Retrieves the image metadata :param BytesIO content: content stream :return: dict image metadata """ content.seek(0) meta = get_meta(content) fix_orientation(content) content.seek(0) return meta def _get_cropping_data(doc): """Get PIL Image crop data from doc with superdesk crops specs. :param doc: crop dict """ if all([doc.get('CropTop', None) is not None, doc.get('CropLeft', None) is not None, doc.get('CropRight', None) is not None, doc.get('CropBottom', None) is not None]): return (int(doc['CropLeft']), int(doc['CropTop']), int(doc['CropRight']), int(doc['CropBottom'])) def crop_image(content, file_name, cropping_data, exact_size=None, image_format=None): """Crop image stream to given crop. :param content: image file stream :param file_name :param cropping_data: superdesk crop dict ({'CropLeft': 0, 'CropTop': 0, ...}) :param exact_size: dict with `width` and `height` values """ if not isinstance(cropping_data, tuple): cropping_data = _get_cropping_data(cropping_data) if cropping_data: logger.debug('Opened image {} from stream, going to crop it'.format(file_name)) content.seek(0) img = Image.open(content) cropped = img.crop(cropping_data) if exact_size and 'width' in exact_size and 'height' in exact_size: cropped = cropped.resize((int(exact_size['width']), int(exact_size['height'])), Image.ANTIALIAS) logger.debug('Cropped image {} from stream, going to save it'.format(file_name)) try: out = BytesIO() cropped.save(out, image_format or img.format) out.seek(0) setattr(out, 'width', cropped.size[0]) setattr(out, 'height', cropped.size[1]) return True, out except Exception as io: logger.exception('Failed to generate crop for filename: {}. Crop: {}'.format(file_name, cropping_data)) return False, io return False, content
agpl-3.0
2,342,765,068,469,414,000
30.95977
115
0.660133
false
FirmlyReality/docklet
src/master/testTaskMgr.py
2
5417
import master.taskmgr from concurrent import futures import grpc from protos.rpc_pb2 import * from protos.rpc_pb2_grpc import * import threading, json, time, random from utils import env class SimulatedNodeMgr(): def get_batch_nodeips(self): return ['0.0.0.0'] class SimulatedMonitorFetcher(): def __init__(self, ip): self.info = {} self.info['cpuconfig'] = [1,1,1,1,1,1,1,1] self.info['meminfo'] = {} self.info['meminfo']['free'] = 8 * 1024 * 1024 # (kb) simulate 8 GB memory self.info['meminfo']['buffers'] = 8 * 1024 * 1024 self.info['meminfo']['cached'] = 8 * 1024 * 1024 self.info['diskinfo'] = [] self.info['diskinfo'].append({}) self.info['diskinfo'][0]['free'] = 16 * 1024 * 1024 * 1024 # (b) simulate 16 GB disk self.info['gpuinfo'] = [1,1] class SimulatedTaskController(WorkerServicer): def __init__(self, worker): self.worker = worker def start_vnode(self, vnodeinfo, context): print('[SimulatedTaskController] start vnode, taskid [%s] vnodeid [%d]' % (vnodeinfo.taskid, vnodeinfo.vnodeid)) return Reply(status=Reply.ACCEPTED,message="") def stop_vnode(self, vnodeinfo, context): print('[SimulatedTaskController] stop vnode, taskid [%s] vnodeid [%d]' % (vnodeinfo.taskid, vnodeinfo.vnodeid)) return Reply(status=Reply.ACCEPTED,message="") def start_task(self, taskinfo, context): print('[SimulatedTaskController] start task, taskid [%s] vnodeid [%d] token [%s]' % (taskinfo.taskid, taskinfo.vnodeid, taskinfo.token)) worker.process(taskinfo) return Reply(status=Reply.ACCEPTED,message="") def stop_task(self, taskinfo, context): print('[SimulatedTaskController] stop task, taskid [%s] vnodeid [%d] token [%s]' % (taskinfo.taskid, taskinfo.vnodeid, taskinfo.token)) return Reply(status=Reply.ACCEPTED,message="") class SimulatedWorker(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.thread_stop = False self.tasks = [] def run(self): worker_port = env.getenv('BATCH_WORKER_PORT') server = grpc.server(futures.ThreadPoolExecutor(max_workers=5)) add_WorkerServicer_to_server(SimulatedTaskController(self), server) server.add_insecure_port('[::]:' + worker_port) server.start() while not self.thread_stop: for task in self.tasks: seed = random.random() if seed < 0.25: report(task.taskid, task.vnodeid, RUNNING, task.token) elif seed < 0.5: report(task.taskid, task.vnodeid, COMPLETED, task.token) self.tasks.remove(task) break elif seed < 0.75: report(task.taskid, task.vnodeid, FAILED, task.token) self.tasks.remove(task) break else: pass time.sleep(5) server.stop(0) def stop(self): self.thread_stop = True def process(self, task): self.tasks.append(task) class SimulatedJobMgr(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.thread_stop = False def run(self): while not self.thread_stop: time.sleep(5) server.stop(0) def stop(self): self.thread_stop = True def report(self, task): print('[SimulatedJobMgr] task[%s] status %d' % (task.info.id, task.status)) def assignTask(self, taskmgr, taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu): task = {} task['instCount'] = instance_count task['retryCount'] = retry_count task['expTime'] = timeout task['at_same_time'] = True task['multicommand'] = True task['command'] = 'ls' task['srcAddr'] = '' task['envVars'] = {'a':'1'} task['stdErrRedPth'] = '' task['stdOutRedPth'] = '' task['image'] = 'root_root_base' task['cpuSetting'] = cpu task['memorySetting'] = memory task['diskSetting'] = disk task['gpuSetting'] = 0 task['mapping'] = [] taskmgr.add_task('root', taskid, task) class SimulatedLogger(): def info(self, msg): print('[INFO] ' + msg) def warning(self, msg): print('[WARNING] ' + msg) def error(self, msg): print('[ERROR] ' + msg) def test(): global worker global jobmgr global taskmgr worker = SimulatedWorker() worker.start() jobmgr = SimulatedJobMgr() jobmgr.start() taskmgr = master.taskmgr.TaskMgr(SimulatedNodeMgr(), SimulatedMonitorFetcher, master_ip='', scheduler_interval=2, external_logger=SimulatedLogger()) # taskmgr.set_jobmgr(jobmgr) taskmgr.start() add('task_0', instance_count=2, retry_count=2, timeout=60, cpu=2, memory=2048, disk=2048, gpu=0) def test2(): global jobmgr global taskmgr jobmgr = SimulatedJobMgr() jobmgr.start() taskmgr = master.taskmgr.TaskMgr(SimulatedNodeMgr(), SimulatedMonitorFetcher, master_ip='', scheduler_interval=2, external_logger=SimulatedLogger()) taskmgr.set_jobmgr(jobmgr) taskmgr.start() add('task_0', instance_count=2, retry_count=2, timeout=60, cpu=2, memory=2048, disk=2048, gpu=0) def add(taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu): global jobmgr global taskmgr jobmgr.assignTask(taskmgr, taskid, instance_count, retry_count, timeout, cpu, memory, disk, gpu) def report(taskid, instanceid, status, token): global taskmgr master_port = env.getenv('BATCH_MASTER_PORT') channel = grpc.insecure_channel('%s:%s' % ('0.0.0.0', master_port)) stub = MasterStub(channel) response = stub.report(ReportMsg(taskmsgs=[TaskMsg(taskid=taskid, username='root', vnodeid=instanceid, subTaskStatus=status, token=token)])) def stop(): global worker global jobmgr global taskmgr worker.stop() jobmgr.stop() taskmgr.stop()
bsd-3-clause
-676,241,921,716,462,500
27.067358
149
0.690419
false
fastinetserver/portage-idfetch
pym/portage/cache/sql_template.py
1
9336
# Copyright: 2005 Gentoo Foundation # Author(s): Brian Harring ([email protected]) # License: GPL2 import sys from portage.cache import template, cache_errors from portage.cache.template import reconstruct_eclasses class SQLDatabase(template.database): """template class for RDBM based caches This class is designed such that derivatives don't have to change much code, mostly constant strings. _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived from. SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE- basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of recovering that id, then modify _insert_cpv to remove the extra select. Creation of a derived class involves supplying _initdb_con, and table_exists. Additionally, the default schemas may have to be modified. """ SCHEMA_PACKAGE_NAME = "package_cache" SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME SCHEMA_VALUES_NAME = "values_cache" SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME) SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME _BaseError = () _dbClass = None autocommits = False # cleanse_keys = True # boolean indicating if the derived RDBMS class supports replace syntax _supports_replace = False def __init__(self, location, label, auxdbkeys, *args, **config): """initialize the instance. derived classes shouldn't need to override this""" super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config) config.setdefault("host","127.0.0.1") config.setdefault("autocommit", self.autocommits) self._initdb_con(config) self.label = self._sfilter(self.label) def _dbconnect(self, config): """should be overridden if the derived class needs special parameters for initializing the db connection, or cursor""" self.db = self._dbClass(**config) self.con = self.db.cursor() def _initdb_con(self,config): """ensure needed tables are in place. If the derived class needs a different set of table creation commands, overload the approriate SCHEMA_ attributes. If it needs additional execution beyond, override""" self._dbconnect(config) if not self._table_exists(self.SCHEMA_PACKAGE_NAME): if self.readonly: raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \ self.SCHEMA_PACKAGE_NAME) try: self.con.execute(self.SCHEMA_PACKAGE_CREATE) except self._BaseError as e: raise cache_errors.InitializationError(self.__class__, e) if not self._table_exists(self.SCHEMA_VALUES_NAME): if self.readonly: raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \ self.SCHEMA_VALUES_NAME) try: self.con.execute(self.SCHEMA_VALUES_CREATE) except self._BaseError as e: raise cache_errors.InitializationError(self.__class__, e) def _table_exists(self, tbl): """return true if a table exists derived classes must override this""" raise NotImplementedError def _sfilter(self, s): """meta escaping, returns quoted string for use in sql statements""" return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"") def _getitem(self, cpv): try: self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s " "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME, self.label, self._sfilter(cpv))) except self._BaseError as e: raise cache_errors.CacheCorruption(self, cpv, e) rows = self.con.fetchall() if len(rows) == 0: raise KeyError(cpv) vals = dict([(k,"") for k in self._known_keys]) vals.update(dict(rows)) return vals def _delitem(self, cpv): """delete a cpv cache entry derived RDBM classes for this *must* either support cascaded deletes, or override this method""" try: try: self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv))) if self.autocommits: self.commit() except self._BaseError as e: raise cache_errors.CacheCorruption(self, cpv, e) if self.con.rowcount <= 0: raise KeyError(cpv) except Exception: if not self.autocommits: self.db.rollback() # yes, this can roll back a lot more then just the delete. deal. raise def __del__(self): # just to be safe. if "db" in self.__dict__ and self.db != None: self.commit() self.db.close() def _setitem(self, cpv, values): try: # insert. try: pkgid = self._insert_cpv(cpv) except self._BaseError as e: raise cache_errors.CacheCorruption(cpv, e) # __getitem__ fills out missing values, # so we store only what's handed to us and is a known key db_values = [] for key in self._known_keys: if key in values and values[key]: db_values.append({"key":key, "value":values[key]}) if len(db_values) > 0: try: self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values) except self._BaseError as e: raise cache_errors.CacheCorruption(cpv, e) if self.autocommits: self.commit() except Exception: if not self.autocommits: try: self.db.rollback() except self._BaseError: pass raise def _insert_cpv(self, cpv): """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition doesn't support auto-increment columns for pkgid. returns the cpvs new pkgid note this doesn't commit the transaction. The caller is expected to.""" cpv = self._sfilter(cpv) if self._supports_replace: query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1) else: # just delete it. try: del self[cpv] except (cache_errors.CacheCorruption, KeyError): pass query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE try: self.con.execute(query_str % (self.label, cpv)) except self._BaseError: self.db.rollback() raise self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \ (self.SCHEMA_PACKAGE_NAME, self.label, cpv)) if self.con.rowcount != 1: raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found " " %i matches upon the following select!" % len(rows)) return self.con.fetchone()[0] def __contains__(self, cpv): if not self.autocommits: try: self.commit() except self._BaseError as e: raise cache_errors.GeneralCacheCorruption(e) try: self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv))) except self._BaseError as e: raise cache_errors.GeneralCacheCorruption(e) return self.con.rowcount > 0 def __iter__(self): if not self.autocommits: try: self.commit() except self._BaseError as e: raise cache_errors.GeneralCacheCorruption(e) try: self.con.execute("SELECT cpv FROM %s WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.label)) except self._BaseError as e: raise cache_errors.GeneralCacheCorruption(e) # return [ row[0] for row in self.con.fetchall() ] for x in self.con.fetchall(): yield x[0] def iteritems(self): try: self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s " "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME, self.label)) except self._BaseError as e: raise cache_errors.CacheCorruption(self, cpv, e) oldcpv = None l = [] for x, y, v in self.con.fetchall(): if oldcpv != x: if oldcpv != None: d = dict(l) if "_eclasses_" in d: d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"]) else: d["_eclasses_"] = {} yield cpv, d l.clear() oldcpv = x l.append((y,v)) if oldcpv != None: d = dict(l) if "_eclasses_" in d: d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"]) else: d["_eclasses_"] = {} yield cpv, d def commit(self): self.db.commit() def get_matches(self,match_dict): query_list = [] for k,v in match_dict.items(): if k not in self._known_keys: raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance") v = v.replace("%","\\%") v = v.replace(".*","%") query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v))) if len(query_list): query = " AND "+" AND ".join(query_list) else: query = '' print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query)) try: self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \ (self.label, query)) except self._BaseError as e: raise cache_errors.GeneralCacheCorruption(e) return [ row[0] for row in self.con.fetchall() ] if sys.hexversion >= 0x3000000: items = iteritems keys = __iter__
gpl-2.0
8,476,169,925,880,248,000
30.434343
114
0.678877
false
Ryex/Rabbyt
rabbyt/sprites.py
1
6567
from rabbyt._sprites import cBaseSprite, cSprite from rabbyt._rabbyt import pick_texture_target from rabbyt.anims import anim_slot, swizzle, Animable from rabbyt.primitives import Quad class BaseSprite(cBaseSprite, Animable): """ ``BaseSprite(...)`` This class provides some basic functionality for sprites: * transformations (x, y, rot, scale) * color (red, green, blue, alpha) * bounding_radius (for collision detection) ``BaseSprite`` doesn't render anything itself You'll want to subclass it and override either ``render()`` or ``render_after_transform()``. You can pass any of the ``BaseSprite`` properties as keyword arguments. (``x``, ``y``, ``xy``, etc.) """ x = anim_slot(default=0, index=0, doc="x coordinate of the sprite") y = anim_slot(default=0, index=1, doc="y coordinate of the sprite") rot = anim_slot(default=0, index=2, doc="rotation angle in degrees.") red = anim_slot(default=1, index=3, doc="red color component") green = anim_slot(default=1, index=4, doc="green color component") blue = anim_slot(default=1, index=5, doc="blue color component") alpha = anim_slot(default=1, index=6, doc="alpha color component") scale_x = anim_slot(default=1, index=7, doc="x component of ``scale``") scale_y = anim_slot(default=1, index=8, doc="y component of ``scale``") xy = swizzle("x", "y") rgb = swizzle("red", "green", "blue") rgba = swizzle("red", "green", "blue", "alpha") def _get_scale(self): if self.scale_x == self.scale_y: return self.scale_x else: return (self.scale_x, self.scale_y) def _set_scale(self, s): if hasattr(s, "__len__"): self.scale_x, self.scale_y = s else: self.scale_x = self.scale_y = s scale = property(_get_scale, _set_scale, doc= """ scale ``1.0`` is normal size; ``0.5`` is half size, ``2.0`` is double size... you get the point. You can scale the x and y axes independently by assigning a tuple with a length of two. """) class Sprite(cSprite, BaseSprite): """ ``Sprite(texture=None, shape=None, tex_shape=(0,1,1,0), ...)`` This class provides a basic, four point, textured sprite. All arguments are optional. ``texture`` should be an image filename, a pyglet texture object, or an OpenGL texture id. (See ``Sprite.texture`` for more information.) If ``shape`` is not given it will default to the dimensions of the texture if they are available. For more information on ``shape`` and ``tex_shape`` read the docstrings for ``Sprite.shape`` and ``Sprite.tex_shape`` Additionally, you can pass values for most of the properties as keyword arguments. (``x``, ``y``, ``xy``, ``u``, ``v``, ``uv``, etc...) """ u = anim_slot(default=0, index=9, doc="texture offset") v = anim_slot(default=0, index=10, doc="texture offset") uv = swizzle("u", "v") def __init__(self, texture=None, shape=None, tex_shape=None, **kwargs): BaseSprite.__init__(self) cSprite.__init__(self) self.red = self.green = self.blue = self.alpha = 1 self.x = self.y = 0 self.scale = 1 self.rot = 0 self.texture_id = -1 # If no shape or tex_shape was given, we want to have useful defaults # in case the texture doesn't set them. if shape is None: s = 10. self.shape = [s, s, -s, -s] if tex_shape is None: self.tex_shape = (0,1,1,0) self.texture = texture # If shape or tex_shape were given, we want them to override the # values set when we set the texture. if shape is not None: self.shape = shape if tex_shape is not None: self.tex_shape = tex_shape for name, value in list(kwargs.items()): if hasattr(self.__class__, name) and isinstance( getattr(self.__class__, name), (swizzle, anim_slot, property)): setattr(self, name, value) else: raise ValueError("unexpected keyword argument %r" % name) def ensure_target(self): if not self.texture_target: target = pick_texture_target() self.texture_target = target def _get_texture(self): return self._tex_obj def _set_texture(self, texture): self._tex_obj = texture tex_size = None if isinstance(texture, str): from rabbyt._rabbyt import load_texture_file_hook res = load_texture_file_hook(texture) if isinstance(res, tuple) and len(res) == 2: self.texture_id, tex_size = res else: self.texture = res # Recursive elif isinstance(texture, int): self.texture_id = texture elif hasattr(texture, "id"): if hasattr(texture, "target"): self.texture_target = texture.target self.texture_id = texture.id if hasattr(texture, "tex_coords"): self.tex_shape = texture.tex_coords self.uv = 0,0 elif hasattr(texture, "tex_shape"): self.tex_shape = texture.tex_shape if hasattr(texture, "width") and hasattr(texture, "height"): tex_size = (texture.width, texture.height) elif texture is None: self.texture_id = 0 else: raise ValueError("texture should be either an int or str.") if tex_size: w, h = tex_size self.shape = [-w/2, h/2, w/2, -h/2] texture = property(_get_texture, _set_texture, doc= """ ``Sprite.texture`` The texture used for this sprite. The value can be in a variety of formats: If it's a string, it will be used as a filename to load the texture. If it's an integer, it will be used as an OpenGL texture id. If it's an object with an ``id`` attribute, it will be treated as a pyglet texture object. (The ``width``, ``height``, and ``tex_coords`` attributes will set the sprite's ``shape`` and ``tex_shape`` properties.) """) __docs_all__ = ["BaseSprite", "Sprite"]
mit
-8,949,477,883,439,346,000
35.525714
78
0.559769
false
aronasorman/kolibri
kolibri/logger/serializers.py
1
2680
from kolibri.logger.models import AttemptLog, ContentRatingLog, ContentSessionLog, ContentSummaryLog, MasteryLog, UserSessionLog from rest_framework import serializers class ContentSessionLogSerializer(serializers.ModelSerializer): class Meta: model = ContentSessionLog fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress') class MasteryLogSerializer(serializers.ModelSerializer): pastattempts = serializers.SerializerMethodField() totalattempts = serializers.SerializerMethodField() class Meta: model = MasteryLog fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete') def get_pastattempts(self, obj): # will return a list of the latest 10 correct and hint_taken fields for each attempt. return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10] def get_totalattempts(self, obj): return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count() class AttemptLogSerializer(serializers.ModelSerializer): class Meta: model = AttemptLog fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog', 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history') class ContentSummaryLogSerializer(serializers.ModelSerializer): currentmasterylog = serializers.SerializerMethodField() class Meta: model = ContentSummaryLog fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog', 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields') def get_currentmasterylog(self, obj): try: current_log = obj.masterylogs.latest('end_timestamp') return MasteryLogSerializer(current_log).data except MasteryLog.DoesNotExist: return None class ContentRatingLogSerializer(serializers.ModelSerializer): class Meta: model = ContentRatingLog fields = ('pk', 'user', 'content_id', 'channel_id', 'quality', 'ease', 'learning', 'feedback') class UserSessionLogSerializer(serializers.ModelSerializer): class Meta: model = UserSessionLog fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')
mit
401,927,789,942,079,940
39.606061
141
0.680597
false
SAAVY/magpie
client/blacklist.py
1
1546
from flask import current_app from netaddr import IPNetwork, IPAddress from netaddr.core import AddrFormatError bl_website_ip = [] # array of tuples (network mask, port) def build_website_blacklist(logger): with open("config/blacklist_website_ip.txt") as f: for line in f: network_address = line.strip() ip, separator, port = network_address.rpartition(':') if not separator: address = (network_address, '') else: address = (ip, port) if not port: logger.error("check blacklist_website_ip.txt: must specify port number after ':' in ip") continue try: IPNetwork(address[0]) bl_website_ip.append(address) except AddrFormatError as e: logger.error("Format error. check blacklist_website_ip.txt: %s" % str(e)) def is_website_blacklisted(website_ip, website_port): logger = current_app.logger logger.debug("FUNC: is_website_blacklisted ip_address: %s port: %s" % (website_ip, website_port)) for network_mask, port in bl_website_ip: try: if IPAddress(website_ip) in IPNetwork(network_mask): if port and website_port == port: return True elif port: return False return True except Exception as e: logger.exception("FUNC: is_website_blacklisted Exception: %s" % str(e)) return False
mit
-8,088,540,729,857,940,000
36.707317
108
0.574386
false
pycrystem/pycrystem
pyxem/tests/test_signals/test_power2d.py
1
3662
# -*- coding: utf-8 -*- # Copyright 2017-2019 The pyXem developers # # This file is part of pyXem. # # pyXem is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pyXem is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyXem. If not, see <http://www.gnu.org/licenses/>. import pytest import numpy as np import dask.array as da from hyperspy.signals import Signal2D from pyxem.signals.power2d import Power2D, LazyPower2D class TestComputeAndAsLazy2D: def test_2d_data_compute(self): dask_array = da.random.random((100, 150), chunks=(50, 50)) s = LazyPower2D(dask_array) scale0, scale1, metadata_string = 0.5, 1.5, "test" s.axes_manager[0].scale = scale0 s.axes_manager[1].scale = scale1 s.metadata.Test = metadata_string s.compute() assert s.__class__ == Power2D assert not hasattr(s.data, "compute") assert s.axes_manager[0].scale == scale0 assert s.axes_manager[1].scale == scale1 assert s.metadata.Test == metadata_string assert dask_array.shape == s.data.shape def test_4d_data_compute(self): dask_array = da.random.random((4, 4, 10, 15), chunks=(1, 1, 10, 15)) s = LazyPower2D(dask_array) s.compute() assert s.__class__ == Power2D assert dask_array.shape == s.data.shape def test_2d_data_as_lazy(self): data = np.random.random((100, 150)) s = Power2D(data) scale0, scale1, metadata_string = 0.5, 1.5, "test" s.axes_manager[0].scale = scale0 s.axes_manager[1].scale = scale1 s.metadata.Test = metadata_string s_lazy = s.as_lazy() assert s_lazy.__class__ == LazyPower2D assert hasattr(s_lazy.data, "compute") assert s_lazy.axes_manager[0].scale == scale0 assert s_lazy.axes_manager[1].scale == scale1 assert s_lazy.metadata.Test == metadata_string assert data.shape == s_lazy.data.shape def test_4d_data_as_lazy(self): data = np.random.random((4, 10, 15)) s = Power2D(data) s_lazy = s.as_lazy() assert s_lazy.__class__ == LazyPower2D assert data.shape == s_lazy.data.shape class TestPower: @pytest.fixture def flat_pattern(self): pd = Power2D(data=np.ones(shape=(2, 2, 5, 5))) return pd @pytest.mark.parametrize("k_region", [None, [2.0, 4.0]]) @pytest.mark.parametrize("sym", [None, 4, [2, 4]]) def test_power_signal_get_map(self, flat_pattern, k_region, sym): flat_pattern.get_map(k_region=k_region, symmetry=sym) @pytest.mark.parametrize("k_region", [None, [2.0, 4.0]]) @pytest.mark.parametrize("sym", [[2, 4]]) def test_power_signal_plot_symmetries(self, flat_pattern, k_region, sym): flat_pattern.plot_symmetries(k_region=k_region, symmetry=sym) class TestDecomposition: def test_decomposition_is_performed(self, diffraction_pattern): s = Power2D(diffraction_pattern) s.decomposition() assert s.learning_results is not None def test_decomposition_class_assignment(self, diffraction_pattern): s = Power2D(diffraction_pattern) s.decomposition() assert isinstance(s, Power2D)
gpl-3.0
3,197,397,849,401,754,000
35.62
77
0.647733
false
McGillX/edx_data_research
edx_data_research/parsing/parse_course_structure.py
1
4815
import json from edx_data_research.parsing.parse import Parse class CourseStructure(Parse): def __init__(self, args): super(CourseStructure, self).__init__(args) self.collections = ['course_structure'] self.course_structure_file = args.course_structure_file self.drop = args.drop def migrate(self): if self.drop: self.collections['course_structure'].drop() json_data = self._load_json_data(self.course_structure_file) json_data = self._parse_key_names(json_data) json_data = self._delete_category(json_data, 'conditional') json_data = self._delete_category(json_data, 'wrapper') json_data = self._build_parent_data(json_data) json_data = self._update_parent_data(json_data) for key in json_data: self.collections['course_structure'].insert(json_data[key]) def _load_json_data(self, file_name): '''Retrieve data from the json file''' with open(file_name) as file_handler: json_data = json.load(file_handler) return json_data def _parse_key_names(self, json_data): '''Parse key names''' new_json_data = {} for key in json_data: new_key = key.split('/')[-1] json_data[key]['_id'] = new_key if json_data[key]['children']: for index, child in enumerate(json_data[key]['children']): json_data[key]['children'][index] = child.split('/')[-1] new_json_data[new_key] = json_data[key] return new_json_data def _delete_category(self, json_data, category): '''Delete data with given category from json_data ''' for key in json_data.keys(): if json_data[key]['category'] == category: for item in json_data.keys(): if json_data[item]['children'] and key in json_data[item]['children']: parent_id = item index_child = json_data[parent_id]['children'].index(key) left_list = json_data[parent_id]['children'][:index_child] right_list = json_data[parent_id]['children'][index_child + 1:] json_data[parent_id]['children'] = left_list + json_data[key]['children'] + right_list del json_data[key] return json_data def _build_parent_data(self, json_data): '''Build parent data''' error_count = 0 for key in json_data: if json_data[key]['children']: for index, child_key in enumerate(json_data[key]['children']): try: json_data[child_key]['parent_data'] = {} except: error_count += 1 continue parent_category = json_data[key]['category'] parent_order_key = parent_category + '_order' parent_id_key = parent_category + '_id' parent_display_name_key = parent_category + '_display_name' json_data[child_key]['parent_data'][parent_order_key] = index json_data[child_key]['parent_data'][parent_id_key] = json_data[key]['_id'] json_data[child_key]['parent_data'][parent_display_name_key] = json_data[key]['metadata']['display_name'] print "Number of errors when building parent data: {0}".format(error_count) return json_data def _update_parent_data(self, json_data): for key in json_data: if json_data[key]['category'] == 'sequential': chapter_id = json_data[key]['parent_data']['chapter_id'] chapter_parent_data = json_data[chapter_id]['parent_data'] json_data[key]['parent_data'].update(chapter_parent_data) for key in json_data: if json_data[key]['category'] == 'vertical': sequential_id = json_data[key]['parent_data']['sequential_id'] sequential_parent_data = json_data[sequential_id]['parent_data'] json_data[key]['parent_data'].update(sequential_parent_data) for key in json_data: if json_data[key]['category'] not in set(['vertical', 'sequential', 'chapter', 'course']): try: vertical_id = json_data[key]['parent_data']['vertical_id'] vertical_parent_data = json_data[vertical_id]['parent_data'] json_data[key]['parent_data'].update(vertical_parent_data) except: print "ERROR: {0}".format(json_data[key]) return json_data
mit
5,415,329,089,375,148,000
46.636364
125
0.538525
false
lizardsystem/threedilib
threedilib/modeling/convert.py
1
8275
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst. # -*- coding: utf-8 -*- """ Convert shapefiles with z coordinates. Choose from the following formats: 'inp' to create an inp file, 'img' to create an image with a plot of the feature, or 'shp' to output a shapefile with the average height of a feature stored in an extra attribute. """ from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from __future__ import division import argparse import math import os import shutil import tempfile from matplotlib.backends import backend_agg from matplotlib import figure from osgeo import gdal from osgeo import ogr from PIL import Image ogr.UseExceptions() def get_parser(): """ Return argument parser. """ parser = argparse.ArgumentParser( description=__doc__, ) parser.add_argument('source_path', metavar='SOURCE', help=('Path to source shapefile.')) parser.add_argument('target_path', metavar='TARGET', help=('Path to target file.')) parser.add_argument('-of', '--output-format', metavar='FORMAT', choices=['inp', 'img', 'shp'], default='shp', help=("Path to output.")) return parser class InputFileWriter(object): """ Writer for input files. """ def __init__(self, path): """ Init the counters and tmpdirs """ self.path = path self.node_count = 0 self.link_count = 0 def __enter__(self): """ Setup tempfiles. """ self.temp_directory = tempfile.mkdtemp() self.node_file = open( os.path.join(self.temp_directory, 'nodes'), 'a+', ) self.link_file = open( os.path.join(self.temp_directory, 'links'), 'a+', ) return self def __exit__(self, type, value, traceback): """ Write 'inputfile' at path. """ with open(self.path, 'w') as input_file: self.node_file.seek(0) input_file.write(self.node_file.read()) input_file.write('-1\n') self.link_file.seek(0) input_file.write(self.link_file.read()) self.node_file.close() self.link_file.close() shutil.rmtree(self.temp_directory) def _write_node(self, node): """ Write a node. """ self.node_count += 1 self.node_file.write('{} {} {} {}\n'.format( self.node_count, node[0], node[1], -node[2] # Depth, not height! )) def _write_link(self): """ Write a link between previous node and next node.""" self.link_count += 1 self.link_file.write('{} {} {}\n'.format( self.link_count, self.node_count, self.node_count + 1, )) def _add_wkb_line_string(self, wkb_line_string): """ Add linestring as nodes and links. """ nodes = [wkb_line_string.GetPoint(i) for i in range(wkb_line_string.GetPointCount())] # Add nodes and links up to the last node for i in range(len(nodes) - 1): self._write_node(nodes[i]) self._write_link() # Add last node, link already covered. self._write_node(nodes[-1]) def add_feature(self, feature): """ Add feature as nodes and links. """ geometry = feature.geometry() geometry_type = geometry.GetGeometryType() if geometry_type == ogr.wkbLineString25D: self._add_wkb_line_string(geometry) elif geometry_type == ogr.wkbMultiLineString25D: for wkb_line_string in geometry: self._add_wkb_line_string(wkb_line_string) class ImageWriter(object): """ Writer for images. """ def __init__(self, path): self.count = 0 self.path = path def __enter__(self): return self def _add_wkb_line_string(self, wkb_line_string, label): """ Plot linestring as separate image. """ # Get data x, y, z = zip(*[wkb_line_string.GetPoint(i) for i in range(wkb_line_string.GetPointCount())]) # Determine distance along line l = [0] for i in range(len(z) - 1): l.append(l[-1] + math.sqrt( (x[i + 1] - x[i]) ** 2 + (y[i + 1] - y[i]) ** 2, )) # Plot in matplotlib fig = figure.Figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) axes.plot(l, z, label=label) axes.legend(loc='best', frameon=False) # Write to image backend_agg.FigureCanvasAgg(fig) buf, size = fig.canvas.print_to_buffer() image = Image.fromstring('RGBA', size, buf) root, ext = os.path.splitext(self.path) image.save(root + '{:00.0f}'.format(self.count) + ext) self.count += 1 def add_feature(self, feature): """ Currently saves every feature in a separate image. """ # Plotlabel label = '\n'.join([': '.join(str(v) for v in item) for item in feature.items().items()]) # Plot according to geometry type geometry = feature.geometry() geometry_type = geometry.GetGeometryType() if geometry_type == ogr.wkbLineString25D: self._add_wkb_line_string(geometry, label=label) elif geometry_type == ogr.wkbMultiLineString25D: for wkb_line_string in geometry: self._add_wkb_line_string(wkb_line_string, label=label) def __exit__(self, type, value, traceback): pass class ShapefileWriter(object): """ Writer for shapefiles. """ ATTRIBUTE = b'kruinhoogt' def __init__(self, path): self.count = 0 self.path = path self.datasource = None self.layer = None def __enter__(self): return self def create_datasource(self, feature): """ Create a datasource based on feature. """ root, ext = os.path.splitext(os.path.basename(self.path)) driver = ogr.GetDriverByName(b'ESRI Shapefile') datasource = driver.CreateDataSource(self.path) layer = datasource.CreateLayer(root) for i in range(feature.GetFieldCount()): layer.CreateField(feature.GetFieldDefnRef(i)) field_defn = ogr.FieldDefn(self.ATTRIBUTE, ogr.OFTReal) layer.CreateField(field_defn) self.datasource = datasource self.layer = layer def add_feature(self, feature): """ Currently saves every feature in a separate image. """ if self.layer is None: self.create_datasource(feature) layer_defn = self.layer.GetLayerDefn() # elevation geometry = feature.geometry().Clone() geometry_type = geometry.GetGeometryType() if geometry_type == ogr.wkbLineString25D: elevation = min([p[2] for p in geometry.GetPoints()]) else: # multilinestring elevation = min([p[2] for g in geometry for p in g.GetPoints()]) geometry.FlattenTo2D() new_feature = ogr.Feature(layer_defn) new_feature.SetGeometry(geometry) for k, v in feature.items().items(): new_feature[k] = v new_feature[self.ATTRIBUTE] = elevation self.layer.CreateFeature(new_feature) def __exit__(self, type, value, traceback): pass def convert(source_path, target_path, output_format): """ Convert shapefile to inp file.""" source_dataset = ogr.Open(str(source_path)) writers = dict( inp=InputFileWriter, img=ImageWriter, shp=ShapefileWriter, ) with writers[output_format](target_path) as writer: for source_layer in source_dataset: total = source_layer.GetFeatureCount() for count, source_feature in enumerate(source_layer, 1): writer.add_feature(source_feature) gdal.TermProgress_nocb(count / total) def main(): """ Call convert() with commandline args. """ convert(**vars(get_parser().parse_args())) if __name__ == '__main__': exit(main())
gpl-3.0
6,584,589,084,668,582,000
32.1
77
0.570997
false
holzenburg/feedshare
feedshare/feedlists/migrations/0002_auto__add_feedlistfeed__del_field_feed_feedlist__del_field_feed_tags__.py
1
8074
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'FeedListFeed' db.create_table(u'feedlists_feedlistfeed', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('feedlist', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feedlists.FeedList'])), ('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feedlists.Feed'])), ('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), )) db.send_create_signal(u'feedlists', ['FeedListFeed']) # Deleting field 'Feed.feedlist' db.delete_column(u'feedlists_feed', 'feedlist_id') # Deleting field 'Feed.tags' db.delete_column(u'feedlists_feed', 'tags') # Adding field 'Feed.site_url' db.add_column(u'feedlists_feed', 'site_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True), keep_default=False) # Changing field 'Feed.description' db.alter_column(u'feedlists_feed', 'description', self.gf('django.db.models.fields.TextField')(null=True)) # Changing field 'Feed.title' db.alter_column(u'feedlists_feed', 'title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)) # Changing field 'FeedList.description' db.alter_column(u'feedlists_feedlist', 'description', self.gf('django.db.models.fields.TextField')(null=True)) # Changing field 'FeedList.title' db.alter_column(u'feedlists_feedlist', 'title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)) # Changing field 'FeedList.author_email' db.alter_column(u'feedlists_feedlist', 'author_email', self.gf('django.db.models.fields.EmailField')(max_length=255, null=True)) # Changing field 'FeedList.url' db.alter_column(u'feedlists_feedlist', 'url', self.gf('django.db.models.fields.URLField')(max_length=255, null=True)) # Changing field 'FeedList.author' db.alter_column(u'feedlists_feedlist', 'author', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)) # Changing field 'FeedList.file' db.alter_column(u'feedlists_feedlist', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True)) def backwards(self, orm): # Deleting model 'FeedListFeed' db.delete_table(u'feedlists_feedlistfeed') # User chose to not deal with backwards NULL issues for 'Feed.feedlist' raise RuntimeError("Cannot reverse this migration. 'Feed.feedlist' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'Feed.feedlist' db.add_column(u'feedlists_feed', 'feedlist', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feedlists.FeedList']), keep_default=False) # Adding field 'Feed.tags' db.add_column(u'feedlists_feed', 'tags', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False) # Deleting field 'Feed.site_url' db.delete_column(u'feedlists_feed', 'site_url') # Changing field 'Feed.description' db.alter_column(u'feedlists_feed', 'description', self.gf('django.db.models.fields.TextField')(default='')) # Changing field 'Feed.title' db.alter_column(u'feedlists_feed', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=255)) # Changing field 'FeedList.description' db.alter_column(u'feedlists_feedlist', 'description', self.gf('django.db.models.fields.TextField')(default='')) # Changing field 'FeedList.title' db.alter_column(u'feedlists_feedlist', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=255)) # Changing field 'FeedList.author_email' db.alter_column(u'feedlists_feedlist', 'author_email', self.gf('django.db.models.fields.EmailField')(default='', max_length=255)) # Changing field 'FeedList.url' db.alter_column(u'feedlists_feedlist', 'url', self.gf('django.db.models.fields.URLField')(default='', max_length=255)) # Changing field 'FeedList.author' db.alter_column(u'feedlists_feedlist', 'author', self.gf('django.db.models.fields.CharField')(default='', max_length=255)) # User chose to not deal with backwards NULL issues for 'FeedList.file' raise RuntimeError("Cannot reverse this migration. 'FeedList.file' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Changing field 'FeedList.file' db.alter_column(u'feedlists_feedlist', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=100)) models = { u'feedlists.feed': { 'Meta': {'object_name': 'Feed'}, 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'site_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.TextField', [], {}) }, u'feedlists.feedlist': { 'Meta': {'object_name': 'FeedList'}, 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'author_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'datetime_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feedlists.Feed']", 'through': u"orm['feedlists.FeedListFeed']", 'symmetrical': 'False'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'processing_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'feedlists.feedlistfeed': { 'Meta': {'object_name': 'FeedListFeed'}, 'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedlists.Feed']"}), 'feedlist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedlists.FeedList']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['feedlists']
mit
8,905,969,177,572,606,000
56.269504
180
0.614194
false
RealP/Everpy
examples.py
1
1776
"""Some examples of how to use modules.""" # from everpy_extras import EverPyExtras from everpy_pro import EverPyPro import everpy_utilities PATH_TO_ENSCRIPT = r"C:\Program Files (x86)\Evernote\Evernote\ENScript.exe" def createnote(epy): """Example of how to make a note from python.""" content = open("README.md", "r").read() notebook = "_INBOX" title = "Everpy Generated Note" tags = ["everpy"] attachments = ["README.md"] epy.create_note_from_content(content, notebook_name=notebook, title=title, tags=tags, file_attachments=attachments) def main(): """Example usages.""" dev_token = everpy_utilities.get_token() try: my_evernote = EverPyPro(dev_token, PATH_TO_ENSCRIPT) except: everpy_utilities.refresh_token() my_evernote = EverPyPro(dev_token, PATH_TO_ENSCRIPT) # Find and replace # my_evernote.find_and_replace("evernote", "Evernote", "any:") # Creating a note. # createnote(my_evernote) # Opening client with specific search attributes # my_evernote.get_notes_to_manage() # or # my_evernote.search_notes("stack:Work intitle:\"new employee\"") # Creating a note from an hmtl template # my_evernote.create_note(open("Templates/testnote.html", "r").read(), title="testnote", notebook="_INBOX", tags=["everpy"], attachments=["Templates/testnote.html"]) ############################## # VVVV Tests may not work VVVV. # my_evernote.create_template("Templates/simple_sections.txt") my_evernote.create_template("Templates/card_template.txt") # my_evernote.create_textnote_from_file("template.html", notebook_name="_INBOX") # my_evernote.learn_notebooks() # print(my_evernote.note_book_dict) if __name__ == '__main__': main()
gpl-3.0
-8,499,846,340,086,754,000
33.823529
169
0.662162
false
Stemer114/Reprap_KTY-84-130
repetier/KTY84-130_repetier.py
1
1879
# based on python script from # http://diyhpl.us/reprap/trunk/users/wizard23/python/lookupTables/KTY84-130.py # # adapted by Stemer114 for usage with 4.7k pull-up resistor # table format for repetier firmware # https://github.com/Stemer114/Reprap_KTY-84-130 # # generates a Lookuptable for the following termistor # KTY 84-130 # http://www.datasheetcatalog.org/datasheet/philips/KTY84_SERIES_5.pdf # usage: # python KTY84-130.py >ThermistorTable.h # copy ThermistorTable.h into your firmware dir # enable the lookup table in firmware config.h (depends on firmware) # resistor values are taken from data sheet page 4, table 1 # temperature range is 0C to 300C in steps of 10K # the negative temperature entries and the entry for 25C are omitted resistorValues = [ 498, 538, 581, 626, 672, 722, 773, 826, 882, 940, 1000, 1062, 1127, 1194, 1262, 1334, 1407, 1482, 1560, 1640, 1722, 1807, 1893, 1982, 2073, 2166, 2261, 2357, 2452, 2542, 2624] tempValues = range(0, 301, 10) if len(tempValues) != len(resistorValues): print "Length of temValues %d and resistorValues %d does not match" % (len(tempValues), len(resistorValues)) else: print "// reprap thermistor table for KTY 84-130 temperature sensor" print "// adapted for repetier firmware user thermistortable 1 format" print "// for further details see https://github.com/Stemer114/Reprap_KTY-84-130" print "" print "// consult the readme for how to insert the table into" print "// repetier Configuration.h" print "#define NUM_TEMPS_USERTHERMISTOR1 %d" % (len(tempValues)) print "#define USER_THERMISTORTABLE1 {\ " suffix = "," for i in range(0, len(tempValues)): current = 5.0/(4700.0+resistorValues[i]) voltage = current*resistorValues[i] adValue = round(voltage*1023.0/5.0) if i == len(tempValues)-1: suffix = "" print " {%d*4, %d*8}%s \ " % (adValue, tempValues[i], suffix) print "};"
mit
9,178,785,433,663,180,000
24.391892
109
0.722193
false
khushboo9293/Hyperkitty
hyperkitty/jobs/update_and_clean_index.py
1
1238
# -*- coding: utf-8 -*- # Copyright (C) 2014-2015 by the Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # USA. # # Author: Aurelien Bompard <[email protected]> """ Update the full-text index """ from __future__ import absolute_import, print_function, unicode_literals from django_extensions.management.jobs import BaseJob from hyperkitty.jobs.update_index import run_with_lock class Job(BaseJob): help = "Update the full-text index and clean old entries" when = "daily" def execute(self): run_with_lock(remove=True)
gpl-3.0
-4,215,776,349,076,174,000
32.459459
75
0.74475
false
cs411-entree-app/entree
entree_project/entree_project/urls.py
1
1445
"""entree_project URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.conf.urls import url, include from django.contrib import admin from django.views.generic import RedirectView from entree import views handler400 = 'entree.views.bad_request' handler403 = 'entree.views.permission_denied' handler404 = 'entree.views.page_not_found' handler500 = 'entree.views.server_error' urlpatterns = [ url(r'^$', RedirectView.as_view(url='entree/')), url(r'^entree/', include('entree.urls', namespace='entree')), url(r'^admin/', admin.site.urls), ] if settings.DEBUG: urlpatterns += [ url(r'^400/$', views.bad_request), url(r'^403/$', views.permission_denied), url(r'^404/$', views.page_not_found), url(r'^500/$', views.server_error), ]
apache-2.0
-1,928,786,586,874,997,500
35.125
79
0.692734
false
Ruide/angr-dev
angr/angr/blade.py
1
12198
import networkx import pyvex from .slicer import SimSlicer class Blade(object): """ Blade is a light-weight program slicer that works with networkx DiGraph containing CFGNodes. It is meant to be used in angr for small or on-the-fly analyses. """ def __init__(self, graph, dst_run, dst_stmt_idx, direction='backward', project=None, cfg=None, ignore_sp=False, ignore_bp=False, ignored_regs=None, max_level=3): """ :param networkx.DiGraph graph: A graph representing the control flow graph. Note that it does not take angr.analyses.CFGAccurate or angr.analyses.CFGFast. :param int dst_run: An address specifying the target SimRun. :param int dst_stmt_idx: The target statement index. -1 means executing until the last statement. :param str direction: 'backward' or 'forward' slicing. Forward slicing is not yet supported. :param angr.Project project: The project instance. :param angr.analyses.CFGBase cfg: the CFG instance. It will be made mandatory later. :param bool ignore_sp: Whether the stack pointer should be ignored in dependency tracking. Any dependency from/to stack pointers will be ignored if this options is True. :param bool ignore_bp: Whether the base pointer should be ignored or not. :param int max_level: The maximum number of blocks that we trace back for. :return: None """ self._graph = graph self._dst_run = dst_run self._dst_stmt_idx = dst_stmt_idx self._ignore_sp = ignore_sp self._ignore_bp = ignore_bp self._max_level = max_level self._slice = networkx.DiGraph() self.project = project self._cfg = cfg if self._cfg is None: # `cfg` is made optional only for compatibility concern. It will be made a positional parameter later. raise AngrBladeError('"cfg" must be specified.') if not self._in_graph(self._dst_run): raise AngrBladeError("The specified SimRun %s doesn't exist in graph." % self._dst_run) self._ignored_regs = set() if ignored_regs: for r in ignored_regs: if isinstance(r, (int, long)): self._ignored_regs.add(r) else: self._ignored_regs.add(self.project.arch.registers[r][0]) self._run_cache = { } self._traced_runs = set() if direction == 'backward': self._backward_slice() elif direction == 'forward': raise AngrBladeError('Forward slicing is not implemented yet') else: raise AngrBladeError("Unknown slicing direction %s", direction) # # Properties # @property def slice(self): return self._slice # # Public methods # def dbg_repr(self, arch=None): if arch is None and self.project is not None: arch = self.project.arch s = "" block_addrs = list(set([ a for a, _ in self.slice.nodes_iter() ])) for block_addr in block_addrs: block_str = "IRSB %#x\n" % block_addr block = self.project.factory.block(block_addr).vex included_stmts = set([ stmt for _, stmt in self.slice.nodes_iter() if _ == block_addr ]) for i, stmt in enumerate(block.statements): if arch is not None: if isinstance(stmt, pyvex.IRStmt.Put): reg_name = arch.translate_register_name(stmt.offset) stmt_str = stmt.__str__(reg_name=reg_name) elif isinstance(stmt, pyvex.IRStmt.WrTmp) and isinstance(stmt.data, pyvex.IRExpr.Get): reg_name = arch.translate_register_name(stmt.data.offset) stmt_str = stmt.__str__(reg_name=reg_name) else: stmt_str = str(stmt) else: stmt_str = str(stmt) block_str += "%02s: %s\n" % ("+" if i in included_stmts else "-", stmt_str ) s += block_str s += "\n" return s # # Private methods # def _get_irsb(self, v): """ Get the IRSB object from an address, a SimRun, or a CFGNode. :param v: Can be one of the following: an address, or a CFGNode. :return: The IRSB instance. :rtype: pyvex.IRSB """ if isinstance(v, CFGNode): v = v.addr if type(v) in (int, long): # Generate an IRSB from self._project if v in self._run_cache: return self._run_cache[v] if self.project: irsb = self.project.factory.block(v).vex self._run_cache[v] = irsb return irsb else: raise AngrBladeError("Project must be specified if you give me all addresses for SimRuns") else: raise AngrBladeError('Unsupported SimRun argument type %s', type(v)) def _get_cfgnode(self, thing): """ Get the CFGNode corresponding to the specific address. :param thing: Can be anything that self._normalize() accepts. Usually it's the address of the node :return: the CFGNode instance :rtype: CFGNode """ return self._cfg.get_any_node(self._get_addr(thing)) def _get_addr(self, v): """ Get address of the basic block or CFG node specified by v. :param v: Can be one of the following: a CFGNode, or an address. :return: The address. :rtype: int """ if isinstance(v, CFGNode): return v.addr elif type(v) in (int, long): return v else: raise AngrBladeError('Unsupported SimRun argument type %s' % type(v)) def _in_graph(self, v): return self._get_cfgnode(v) in self._graph def _inslice_callback(self, stmt_idx, stmt, infodict): # pylint:disable=unused-argument tpl = (infodict['irsb_addr'], stmt_idx) if 'prev' in infodict and infodict['prev']: prev = infodict['prev'] self._slice.add_edge(tpl, prev) else: self._slice.add_node(tpl) infodict['prev'] = tpl infodict['has_statement'] = True def _backward_slice(self): """ Backward slicing. We support the following IRStmts: # WrTmp # Put We support the following IRExprs: # Get # RdTmp # Const :return: """ temps = set() regs = set() # Retrieve the target: are we slicing from a register(IRStmt.Put), or a temp(IRStmt.WrTmp)? stmts = self._get_irsb(self._dst_run).statements if self._dst_stmt_idx != -1: dst_stmt = stmts[self._dst_stmt_idx] if type(dst_stmt) is pyvex.IRStmt.Put: regs.add(dst_stmt.offset) elif type(dst_stmt) is pyvex.IRStmt.WrTmp: temps.add(dst_stmt.tmp) else: raise AngrBladeError('Incorrect type of the specified target statement. We only support Put and WrTmp.') prev = (self._get_addr(self._dst_run), self._dst_stmt_idx) else: next_expr = self._get_irsb(self._dst_run).next if type(next_expr) is pyvex.IRExpr.RdTmp: temps.add(next_expr.tmp) elif type(next_expr) is pyvex.IRExpr.Const: # A const doesn't rely on anything else! pass else: raise AngrBladeError('Unsupported type for irsb.next: %s' % type(next_expr)) # Then we gotta start from the very last statement! self._dst_stmt_idx = len(stmts) - 1 prev = (self._get_addr(self._dst_run), 'default') slicer = SimSlicer(self.project.arch, stmts, target_tmps=temps, target_regs=regs, target_stack_offsets=None, inslice_callback=self._inslice_callback, inslice_callback_infodict={ 'irsb_addr': self._get_irsb(self._dst_run)._addr, 'prev': prev, }) regs = slicer.final_regs if self._ignore_sp and self.project.arch.sp_offset in regs: regs.remove(self.project.arch.sp_offset) if self._ignore_bp and self.project.arch.bp_offset in regs: regs.remove(self.project.arch.bp_offset) for offset in self._ignored_regs: if offset in regs: regs.remove(offset) stack_offsets = slicer.final_stack_offsets prev = slicer.inslice_callback_infodict['prev'] if regs or stack_offsets: cfgnode = self._get_cfgnode(self._dst_run) in_edges = self._graph.in_edges(cfgnode, data=True) for pred, _, data in in_edges: if 'jumpkind' in data and data['jumpkind'] == 'Ijk_FakeRet': continue self._backward_slice_recursive(self._max_level - 1, pred, regs, stack_offsets, prev, data.get('stmt_idx', None) ) def _backward_slice_recursive(self, level, run, regs, stack_offsets, prev, exit_stmt_idx): if level <= 0: return temps = set() regs = regs.copy() stmts = self._get_irsb(run).statements if exit_stmt_idx is None or exit_stmt_idx == 'default': # Initialize the temps set with whatever in the `next` attribute of this irsb next_expr = self._get_irsb(run).next if type(next_expr) is pyvex.IRExpr.RdTmp: temps.add(next_expr.tmp) else: exit_stmt = self._get_irsb(run).statements[exit_stmt_idx] if type(exit_stmt.guard) is pyvex.IRExpr.RdTmp: temps.add(exit_stmt.guard.tmp) # Put it in our slice irsb_addr = self._get_addr(run) self._inslice_callback(exit_stmt_idx, exit_stmt, {'irsb_addr': irsb_addr, 'prev': prev}) prev = (irsb_addr, exit_stmt_idx) infodict = {'irsb_addr' : self._get_addr(run), 'prev' : prev, 'has_statement': False } slicer = SimSlicer(self.project.arch, stmts, target_tmps=temps, target_regs=regs, target_stack_offsets=stack_offsets, inslice_callback=self._inslice_callback, inslice_callback_infodict=infodict ) if not infodict['has_statement']: # put this block into the slice self._inslice_callback(0, None, infodict) if run in self._traced_runs: return self._traced_runs.add(run) regs = slicer.final_regs if self._ignore_sp and self.project.arch.sp_offset in regs: regs.remove(self.project.arch.sp_offset) if self._ignore_bp and self.project.arch.bp_offset in regs: regs.remove(self.project.arch.bp_offset) stack_offsets = slicer.final_stack_offsets prev = slicer.inslice_callback_infodict['prev'] if regs or stack_offsets: in_edges = self._graph.in_edges(self._get_cfgnode(run), data=True) for pred, _, data in in_edges: if 'jumpkind' in data and data['jumpkind'] == 'Ijk_FakeRet': continue self._backward_slice_recursive(level - 1, pred, regs, stack_offsets, prev, data.get('stmt_idx', None)) from .errors import AngrBladeError, AngrBladeSimProcError from .analyses.cfg.cfg_node import CFGNode
bsd-2-clause
6,807,302,043,918,270,000
34.876471
120
0.542712
false
blomquisg/heat
heat/common/client.py
1
21833
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # HTTPSClientAuthConnection code comes courtesy of ActiveState website: # http://code.activestate.com/recipes/ # 577548-https-httplib-client-connection-with-certificate-v/ import collections import errno import functools import httplib import logging import os import urllib import urlparse try: from eventlet.green import socket, ssl except ImportError: import socket import ssl try: import sendfile SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False from heat.common import auth from heat.common import exception, utils # common chunk size for get and put CHUNKSIZE = 65536 def handle_unauthorized(func): """ Wrap a function to re-authenticate and retry. """ @functools.wraps(func) def wrapped(self, *args, **kwargs): try: return func(self, *args, **kwargs) except exception.NotAuthorized: self._authenticate(force_reauth=True) return func(self, *args, **kwargs) return wrapped def handle_redirects(func): """ Wrap the _do_request function to handle HTTP redirects. """ MAX_REDIRECTS = 5 @functools.wraps(func) def wrapped(self, method, url, body, headers): for _ in xrange(MAX_REDIRECTS): try: return func(self, method, url, body, headers) except exception.RedirectException as redirect: if redirect.url is None: raise exception.InvalidRedirect() url = redirect.url raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS) return wrapped class ImageBodyIterator(object): """ A class that acts as an iterator over an image file's chunks of data. This is returned as part of the result tuple from `heat.client.Client.get_image` """ def __init__(self, source): """ Constructs the object from a readable image source (such as an HTTPResponse or file-like object) """ self.source = source def __iter__(self): """ Exposes an iterator over the chunks of data in the image file. """ while True: chunk = self.source.read(CHUNKSIZE) if chunk: yield chunk else: break class SendFileIterator: """ Emulate iterator pattern over sendfile, in order to allow send progress be followed by wrapping the iteration. """ def __init__(self, connection, body): self.connection = connection self.body = body self.offset = 0 self.sending = True def __iter__(self): class OfLength: def __init__(self, len): self.len = len def __len__(self): return self.len while self.sending: sent = sendfile.sendfile(self.connection.sock.fileno(), self.body.fileno(), self.offset, CHUNKSIZE) self.sending = (sent != 0) self.offset += sent yield OfLength(sent) class HTTPSClientAuthConnection(httplib.HTTPSConnection): """ Class to make a HTTPS connection, with support for full client-based SSL Authentication :see http://code.activestate.com/recipes/ 577548-https-httplib-client-connection-with-certificate-v/ """ def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None, insecure=False): httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file, cert_file=cert_file) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file self.timeout = timeout self.insecure = insecure def connect(self): """ Connect to a host on a given (SSL) port. If ca_file is pointing somewhere, use it to check Server Certificate. Redefined/copied and extended from httplib.py:1105 (Python 2.6.x). This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to ssl.wrap_socket(), which forces SSL to check server certificate against our client certificate. """ sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() # Check CA file unless 'insecure' is specificed if self.insecure is True: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=ssl.CERT_NONE) else: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ca_certs=self.ca_file, cert_reqs=ssl.CERT_REQUIRED) class BaseClient(object): """A base client class""" DEFAULT_PORT = 80 DEFAULT_DOC_ROOT = None # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, # Suse, FreeBSD/OpenBSD DEFAULT_CA_FILE_PATH = '/etc/ssl/certs/ca-certificates.crt:'\ '/etc/pki/tls/certs/ca-bundle.crt:'\ '/etc/ssl/ca-bundle.pem:'\ '/etc/ssl/cert.pem' OK_RESPONSE_CODES = ( httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.NO_CONTENT, ) REDIRECT_RESPONSE_CODES = ( httplib.MOVED_PERMANENTLY, httplib.FOUND, httplib.SEE_OTHER, httplib.USE_PROXY, httplib.TEMPORARY_REDIRECT, ) def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None, doc_root=None, key_file=None, cert_file=None, ca_file=None, insecure=False, configure_via_auth=True): """ Creates a new client to some service. :param host: The host where service resides :param port: The port where service resides :param use_ssl: Should we use HTTPS? :param auth_tok: The auth token to pass to the server :param creds: The credentials to pass to the auth plugin :param doc_root: Prefix for all URLs we request from host :param key_file: Optional PEM-formatted file that contains the private key. If use_ssl is True, and this param is None (the default), then an environ variable heat_CLIENT_KEY_FILE is looked for. If no such environ variable is found, ClientConnectionError will be raised. :param cert_file: Optional PEM-formatted certificate chain file. If use_ssl is True, and this param is None (the default), then an environ variable heat_CLIENT_CERT_FILE is looked for. If no such environ variable is found, ClientConnectionError will be raised. :param ca_file: Optional CA cert file to use in SSL connections If use_ssl is True, and this param is None (the default), then an environ variable heat_CLIENT_CA_FILE is looked for. :param insecure: Optional. If set then the server's certificate will not be verified. """ self.host = host self.port = port or self.DEFAULT_PORT self.use_ssl = use_ssl self.auth_tok = auth_tok self.creds = creds or {} self.connection = None self.configure_via_auth = configure_via_auth # doc_root can be a nullstring, which is valid, and why we # cannot simply do doc_root or self.DEFAULT_DOC_ROOT below. self.doc_root = (doc_root if doc_root is not None else self.DEFAULT_DOC_ROOT) self.auth_plugin = self.make_auth_plugin(self.creds) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file self.insecure = insecure self.connect_kwargs = self.get_connect_kwargs() def get_connect_kwargs(self): connect_kwargs = {} if self.use_ssl: if self.key_file is None: self.key_file = os.environ.get('heat_CLIENT_KEY_FILE') if self.cert_file is None: self.cert_file = os.environ.get('heat_CLIENT_CERT_FILE') if self.ca_file is None: self.ca_file = os.environ.get('heat_CLIENT_CA_FILE') # Check that key_file/cert_file are either both set or both unset if self.cert_file is not None and self.key_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a cert, " "however you have failed to supply either a " "key_file parameter or set the " "heat_CLIENT_KEY_FILE environ variable") raise exception.ClientConnectionError(msg) if self.key_file is not None and self.cert_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a key, " "however you have failed to supply either a " "cert_file parameter or set the " "heat_CLIENT_CERT_FILE environ variable") raise exception.ClientConnectionError(msg) if (self.key_file is not None and not os.path.exists(self.key_file)): msg = _("The key file you specified %s does not " "exist") % self.key_file raise exception.ClientConnectionError(msg) connect_kwargs['key_file'] = self.key_file if (self.cert_file is not None and not os.path.exists(self.cert_file)): msg = _("The cert file you specified %s does not " "exist") % self.cert_file raise exception.ClientConnectionError(msg) connect_kwargs['cert_file'] = self.cert_file if (self.ca_file is not None and not os.path.exists(self.ca_file)): msg = _("The CA file you specified %s does not " "exist") % self.ca_file raise exception.ClientConnectionError(msg) if self.ca_file is None: for ca in self.DEFAULT_CA_FILE_PATH.split(":"): if os.path.exists(ca): self.ca_file = ca break connect_kwargs['ca_file'] = self.ca_file connect_kwargs['insecure'] = self.insecure return connect_kwargs def set_auth_token(self, auth_tok): """ Updates the authentication token for this client connection. """ # FIXME(sirp): Nova image/heat.py currently calls this. Since this # method isn't really doing anything useful[1], we should go ahead and # rip it out, first in Nova, then here. Steps: # # 1. Change auth_tok in heat to auth_token # 2. Change image/heat.py in Nova to use client.auth_token # 3. Remove this method # # [1] http://mail.python.org/pipermail/tutor/2003-October/025932.html self.auth_tok = auth_tok def configure_from_url(self, url): """ Setups the connection based on the given url. The form is: <http|https>://<host>:port/doc_root """ parsed = urlparse.urlparse(url) self.use_ssl = parsed.scheme == 'https' self.host = parsed.hostname self.port = parsed.port or 80 self.doc_root = parsed.path # ensure connection kwargs are re-evaluated after the service catalog # publicURL is parsed for potential SSL usage self.connect_kwargs = self.get_connect_kwargs() def make_auth_plugin(self, creds): """ Returns an instantiated authentication plugin. """ strategy = creds.get('strategy', 'noauth') plugin = auth.get_plugin_from_strategy(strategy, creds) return plugin def get_connection_type(self): """ Returns the proper connection type """ if self.use_ssl: return HTTPSClientAuthConnection else: return httplib.HTTPConnection def _authenticate(self, force_reauth=False): """ Use the authentication plugin to authenticate and set the auth token. :param force_reauth: For re-authentication to bypass cache. """ auth_plugin = self.auth_plugin if not auth_plugin.is_authenticated or force_reauth: auth_plugin.authenticate() self.auth_tok = auth_plugin.auth_token management_url = auth_plugin.management_url if management_url and self.configure_via_auth: self.configure_from_url(management_url) @handle_unauthorized def do_request(self, method, action, body=None, headers=None, params=None): """ Make a request, returning an HTTP response object. :param method: HTTP verb (GET, POST, PUT, etc.) :param action: Requested path to append to self.doc_root :param body: Data to send in the body of the request :param headers: Headers to send with the request :param params: Key/value pairs to use in query string :returns: HTTP response object """ if not self.auth_tok: self._authenticate() url = self._construct_url(action, params) return self._do_request(method=method, url=url, body=body, headers=headers) def _construct_url(self, action, params=None): """ Create a URL object we can use to pass to _do_request(). """ path = '/'.join([self.doc_root or '', action.lstrip('/')]) scheme = "https" if self.use_ssl else "http" netloc = "%s:%d" % (self.host, self.port) if isinstance(params, dict): for (key, value) in params.items(): if value is None: del params[key] query = urllib.urlencode(params) else: query = None return urlparse.ParseResult(scheme, netloc, path, '', query, '') @handle_redirects def _do_request(self, method, url, body, headers): """ Connects to the server and issues a request. Handles converting any returned HTTP error status codes to OpenStack/heat exceptions and closing the server connection. Returns the result data, or raises an appropriate exception. :param method: HTTP method ("GET", "POST", "PUT", etc...) :param url: urlparse.ParsedResult object with URL information :param body: data to send (as string, filelike or iterable), or None (default) :param headers: mapping of key/value pairs to add as headers :note If the body param has a read attribute, and method is either POST or PUT, this method will automatically conduct a chunked-transfer encoding and use the body as a file object or iterable, transferring chunks of data using the connection's send() method. This allows large objects to be transferred efficiently without buffering the entire body in memory. """ if url.query: path = url.path + "?" + url.query else: path = url.path try: connection_type = self.get_connection_type() headers = headers or {} if 'x-auth-token' not in headers and self.auth_tok: headers['x-auth-token'] = self.auth_tok c = connection_type(url.hostname, url.port, **self.connect_kwargs) def _pushing(method): return method.lower() in ('post', 'put') def _simple(body): return body is None or isinstance(body, basestring) def _filelike(body): return hasattr(body, 'read') def _sendbody(connection, iter): connection.endheaders() for sent in iter: # iterator has done the heavy lifting pass def _chunkbody(connection, iter): connection.putheader('Transfer-Encoding', 'chunked') connection.endheaders() for chunk in iter: connection.send('%x\r\n%s\r\n' % (len(chunk), chunk)) connection.send('0\r\n\r\n') # Do a simple request or a chunked request, depending # on whether the body param is file-like or iterable and # the method is PUT or POST # if not _pushing(method) or _simple(body): # Simple request... c.request(method, path, body, headers) elif _filelike(body) or self._iterable(body): c.putrequest(method, path) for header, value in headers.items(): c.putheader(header, value) iter = self.image_iterator(c, headers, body) if self._sendable(body): # send actual file without copying into userspace _sendbody(c, iter) else: # otherwise iterate and chunk _chunkbody(c, iter) else: raise TypeError('Unsupported image type: %s' % body.__class__) res = c.getresponse() status_code = self.get_status_code(res) if status_code in self.OK_RESPONSE_CODES: return res elif status_code in self.REDIRECT_RESPONSE_CODES: raise exception.RedirectException(res.getheader('Location')) elif status_code == httplib.UNAUTHORIZED: raise exception.NotAuthorized(res.read()) elif status_code == httplib.FORBIDDEN: raise exception.NotAuthorized(res.read()) elif status_code == httplib.NOT_FOUND: raise exception.NotFound(res.read()) elif status_code == httplib.CONFLICT: raise exception.Duplicate(res.read()) elif status_code == httplib.BAD_REQUEST: raise exception.Invalid(res.read()) elif status_code == httplib.MULTIPLE_CHOICES: raise exception.MultipleChoices(body=res.read()) elif status_code == httplib.INTERNAL_SERVER_ERROR: raise Exception("Internal Server error: %s" % res.read()) else: raise Exception("Unknown error occurred! %s" % res.read()) except (socket.error, IOError), e: raise exception.ClientConnectionError(e) def _seekable(self, body): # pipes are not seekable, avoids sendfile() failure on e.g. # cat /path/to/image | heat add ... # or where add command is launched via popen try: os.lseek(body.fileno(), 0, os.SEEK_SET) return True except OSError as e: return (e.errno != errno.ESPIPE) def _sendable(self, body): return (SENDFILE_SUPPORTED and hasattr(body, 'fileno') and self._seekable(body) and not self.use_ssl) def _iterable(self, body): return isinstance(body, collections.Iterable) def image_iterator(self, connection, headers, body): if self._sendable(body): return SendFileIterator(connection, body) elif self._iterable(body): return utils.chunkreadable(body) else: return ImageBodyIterator(body) def get_status_code(self, response): """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): return response.status_int else: return response.status def _extract_params(self, actual_params, allowed_params): """ Extract a subset of keys from a dictionary. The filters key will also be extracted, and each of its values will be returned as an individual param. :param actual_params: dict of keys to filter :param allowed_params: list of keys that 'actual_params' will be reduced to :retval subset of 'params' dict """ result = {} for param in actual_params: if param in allowed_params: result[param] = actual_params[param] elif 'Parameters.member.' in param: result[param] = actual_params[param] return result
apache-2.0
-8,148,688,654,965,902,000
35.880068
79
0.573032
false
Kriechi/mitmproxy
mitmproxy/addons/tlsconfig.py
1
12516
import os from pathlib import Path from typing import List, Optional, TypedDict, Any from OpenSSL import SSL from mitmproxy import certs, ctx, exceptions, connection from mitmproxy.net import tls as net_tls from mitmproxy.options import CONF_BASENAME from mitmproxy.proxy import context from mitmproxy.proxy.layers import tls # We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default. # https://ssl-config.mozilla.org/#config=old DEFAULT_CIPHERS = ( 'ECDHE-ECDSA-AES128-GCM-SHA256', 'ECDHE-RSA-AES128-GCM-SHA256', 'ECDHE-ECDSA-AES256-GCM-SHA384', 'ECDHE-RSA-AES256-GCM-SHA384', 'ECDHE-ECDSA-CHACHA20-POLY1305', 'ECDHE-RSA-CHACHA20-POLY1305', 'DHE-RSA-AES128-GCM-SHA256', 'DHE-RSA-AES256-GCM-SHA384', 'DHE-RSA-CHACHA20-POLY1305', 'ECDHE-ECDSA-AES128-SHA256', 'ECDHE-RSA-AES128-SHA256', 'ECDHE-ECDSA-AES128-SHA', 'ECDHE-RSA-AES128-SHA', 'ECDHE-ECDSA-AES256-SHA384', 'ECDHE-RSA-AES256-SHA384', 'ECDHE-ECDSA-AES256-SHA', 'ECDHE-RSA-AES256-SHA', 'DHE-RSA-AES128-SHA256', 'DHE-RSA-AES256-SHA256', 'AES128-GCM-SHA256', 'AES256-GCM-SHA384', 'AES128-SHA256', 'AES256-SHA256', 'AES128-SHA', 'AES256-SHA', 'DES-CBC3-SHA' ) class AppData(TypedDict): server_alpn: Optional[bytes] http2: bool def alpn_select_callback(conn: SSL.Connection, options: List[bytes]) -> Any: app_data: AppData = conn.get_app_data() server_alpn = app_data["server_alpn"] http2 = app_data["http2"] if server_alpn and server_alpn in options: return server_alpn http_alpns = tls.HTTP_ALPNS if http2 else tls.HTTP1_ALPNS for alpn in options: # client sends in order of preference, so we are nice and respect that. if alpn in http_alpns: return alpn else: return SSL.NO_OVERLAPPING_PROTOCOLS class TlsConfig: """ This addon supplies the proxy core with the desired OpenSSL connection objects to negotiate TLS. """ certstore: certs.CertStore = None # type: ignore # TODO: We should support configuring TLS 1.3 cipher suites (https://github.com/mitmproxy/mitmproxy/issues/4260) # TODO: We should re-use SSL.Context options here, if only for TLS session resumption. # This may require patches to pyOpenSSL, as some functionality is only exposed on contexts. # TODO: This addon should manage the following options itself, which are current defined in mitmproxy/options.py: # - upstream_cert # - add_upstream_certs_to_client_chain # - ciphers_client # - ciphers_server # - key_size # - certs # - cert_passphrase # - ssl_verify_upstream_trusted_ca # - ssl_verify_upstream_trusted_confdir def load(self, loader): loader.add_option( name="tls_version_client_min", typespec=str, default=net_tls.DEFAULT_MIN_VERSION.name, choices=[x.name for x in net_tls.Version], help=f"Set the minimum TLS version for client connections.", ) loader.add_option( name="tls_version_client_max", typespec=str, default=net_tls.DEFAULT_MAX_VERSION.name, choices=[x.name for x in net_tls.Version], help=f"Set the maximum TLS version for client connections.", ) loader.add_option( name="tls_version_server_min", typespec=str, default=net_tls.DEFAULT_MIN_VERSION.name, choices=[x.name for x in net_tls.Version], help=f"Set the minimum TLS version for server connections.", ) loader.add_option( name="tls_version_server_max", typespec=str, default=net_tls.DEFAULT_MAX_VERSION.name, choices=[x.name for x in net_tls.Version], help=f"Set the maximum TLS version for server connections.", ) def tls_clienthello(self, tls_clienthello: tls.ClientHelloData): conn_context = tls_clienthello.context only_non_http_alpns = ( conn_context.client.alpn_offers and all(x not in tls.HTTP_ALPNS for x in conn_context.client.alpn_offers) ) tls_clienthello.establish_server_tls_first = conn_context.server.tls and ( ctx.options.connection_strategy == "eager" or ctx.options.add_upstream_certs_to_client_chain or ctx.options.upstream_cert and ( only_non_http_alpns or not conn_context.client.sni ) ) def tls_start(self, tls_start: tls.TlsStartData): if tls_start.conn == tls_start.context.client: self.create_client_proxy_ssl_conn(tls_start) else: self.create_proxy_server_ssl_conn(tls_start) def create_client_proxy_ssl_conn(self, tls_start: tls.TlsStartData) -> None: client: connection.Client = tls_start.context.client server: connection.Server = tls_start.context.server entry = self.get_cert(tls_start.context) if not client.cipher_list and ctx.options.ciphers_client: client.cipher_list = ctx.options.ciphers_client.split(":") # don't assign to client.cipher_list, doesn't need to be stored. cipher_list = client.cipher_list or DEFAULT_CIPHERS if ctx.options.add_upstream_certs_to_client_chain: # pragma: no cover # exempted from coverage until https://bugs.python.org/issue18233 is fixed. extra_chain_certs = server.certificate_list else: extra_chain_certs = [] ssl_ctx = net_tls.create_client_proxy_context( min_version=net_tls.Version[ctx.options.tls_version_client_min], max_version=net_tls.Version[ctx.options.tls_version_client_max], cipher_list=cipher_list, cert=entry.cert, key=entry.privatekey, chain_file=entry.chain_file, request_client_cert=False, alpn_select_callback=alpn_select_callback, extra_chain_certs=extra_chain_certs, dhparams=self.certstore.dhparams, ) tls_start.ssl_conn = SSL.Connection(ssl_ctx) tls_start.ssl_conn.set_app_data(AppData( server_alpn=server.alpn, http2=ctx.options.http2, )) tls_start.ssl_conn.set_accept_state() def create_proxy_server_ssl_conn(self, tls_start: tls.TlsStartData) -> None: client: connection.Client = tls_start.context.client server: connection.Server = tls_start.context.server assert server.address if ctx.options.ssl_insecure: verify = net_tls.Verify.VERIFY_NONE else: verify = net_tls.Verify.VERIFY_PEER if server.sni is True: server.sni = client.sni or server.address[0] if not server.alpn_offers: if client.alpn_offers: if ctx.options.http2: server.alpn_offers = tuple(client.alpn_offers) else: server.alpn_offers = tuple(x for x in client.alpn_offers if x != b"h2") elif client.tls_established: # We would perfectly support HTTP/1 -> HTTP/2, but we want to keep things on the same protocol version. # There are some edge cases where we want to mirror the regular server's behavior accurately, # for example header capitalization. server.alpn_offers = [] elif ctx.options.http2: server.alpn_offers = tls.HTTP_ALPNS else: server.alpn_offers = tls.HTTP1_ALPNS if not server.cipher_list and ctx.options.ciphers_server: server.cipher_list = ctx.options.ciphers_server.split(":") # don't assign to client.cipher_list, doesn't need to be stored. cipher_list = server.cipher_list or DEFAULT_CIPHERS client_cert: Optional[str] = None if ctx.options.client_certs: client_certs = os.path.expanduser(ctx.options.client_certs) if os.path.isfile(client_certs): client_cert = client_certs else: server_name: str = server.sni or server.address[0] p = os.path.join(client_certs, f"{server_name}.pem") if os.path.isfile(p): client_cert = p ssl_ctx = net_tls.create_proxy_server_context( min_version=net_tls.Version[ctx.options.tls_version_client_min], max_version=net_tls.Version[ctx.options.tls_version_client_max], cipher_list=cipher_list, verify=verify, sni=server.sni, ca_path=ctx.options.ssl_verify_upstream_trusted_confdir, ca_pemfile=ctx.options.ssl_verify_upstream_trusted_ca, client_cert=client_cert, alpn_protos=server.alpn_offers, ) tls_start.ssl_conn = SSL.Connection(ssl_ctx) if server.sni: tls_start.ssl_conn.set_tlsext_host_name(server.sni.encode()) tls_start.ssl_conn.set_connect_state() def running(self): # FIXME: We have a weird bug where the contract for configure is not followed and it is never called with # confdir or command_history as updated. self.configure("confdir") # pragma: no cover def configure(self, updated): if "confdir" not in updated and "certs" not in updated: return certstore_path = os.path.expanduser(ctx.options.confdir) self.certstore = certs.CertStore.from_store( path=certstore_path, basename=CONF_BASENAME, key_size=ctx.options.key_size, passphrase=ctx.options.cert_passphrase.encode("utf8") if ctx.options.cert_passphrase else None, ) if self.certstore.default_ca.has_expired(): ctx.log.warn( "The mitmproxy certificate authority has expired!\n" "Please delete all CA-related files in your ~/.mitmproxy folder.\n" "The CA will be regenerated automatically after restarting mitmproxy.\n" "See https://docs.mitmproxy.org/stable/concepts-certificates/ for additional help.", ) for certspec in ctx.options.certs: parts = certspec.split("=", 1) if len(parts) == 1: parts = ["*", parts[0]] cert = Path(parts[1]).expanduser() if not cert.exists(): raise exceptions.OptionsError(f"Certificate file does not exist: {cert}") try: self.certstore.add_cert_file( parts[0], cert, passphrase=ctx.options.cert_passphrase.encode("utf8") if ctx.options.cert_passphrase else None, ) except ValueError as e: raise exceptions.OptionsError(f"Invalid certificate format for {cert}: {e}") from e def get_cert(self, conn_context: context.Context) -> certs.CertStoreEntry: """ This function determines the Common Name (CN), Subject Alternative Names (SANs) and Organization Name our certificate should have and then fetches a matching cert from the certstore. """ altnames: List[str] = [] organization: Optional[str] = None # Use upstream certificate if available. if conn_context.server.certificate_list: upstream_cert = conn_context.server.certificate_list[0] if upstream_cert.cn: altnames.append(upstream_cert.cn) altnames.extend(upstream_cert.altnames) if upstream_cert.organization: organization = upstream_cert.organization # Add SNI. If not available, try the server address as well. if conn_context.client.sni: altnames.append(conn_context.client.sni) elif conn_context.server.address: altnames.append(conn_context.server.address[0]) # As a last resort, add *something* so that we have a certificate to serve. if not altnames: altnames.append("mitmproxy") # only keep first occurrence of each hostname altnames = list(dict.fromkeys(altnames)) # RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity. # In other words, the Common Name is irrelevant then. return self.certstore.get_cert(altnames[0], altnames, organization)
mit
-6,556,963,244,387,718,000
42.762238
119
0.622803
false
Carreau/difflib2.py
examples/lcs_cutmodule.py
1
5199
from __future__ import print_function from array import array from itertools import islice def lcs_cut2(s1, s2, lcs_low_bound=0, bg=None, debug=False): """Compule the length of the LCS 2 sequences s1 and s2. lcs_low_bound : (int), hint of lower bound for the lenght of the lcs to search for. Default to 0. Algorithmic description: This is a derivation of Hirschberg's algorithm which include some optimisation for specific case. This shoudl use an O(n) memory (n = len(s1)) and should have a worse case scenario time complexity of O(n**2). In the best case scenario, (l ~ n) the time complexity is closer to O(n*l) where l is the lenght of the longest common subsequence. Though, detail of implementaiton of s1 and s2 object slicing will affect the optimal performace. bg is four debug purpose, to see how the algorithme behave visually using iptyhonblocks. uncomment bg lines below to use. """ m = len(s1) n = len(s2) if n==0 or m==0: return 0 # rng is for row "rang" in french, "c" is for current and "p" for previous. # array are n+1 so that last elemnt is 0. This allow # to avoid special casing j=0 as j-1 will wrap arround. # alternative is to offset all indexes by 1, wichi becames hard to # track rngc = array('i',[0 for x in range(n+1)]) ## current row rngp = array('i',[0 for x in range(n+1)]) ## previous row # current max value of the LCS durrgin the search. currentmax = lcs_low_bound # correspond to rngc[j-1], used to avoid lookup in the array # through the loop to shave off soem execution time. rngcjm = None # lower and upper bound for current loop on s2/j limm,limpp = 0,0 # lower bound for iteration on s1/i and # another lower bound s2/j mini,minj = 0,0 if debug: import pdb; pdb.set_trace() for i,c1 in enumerate(s1): # current row become previous, and we reuse previous to avoid # creating a new empty list. rngc, rngp = rngp, rngc limm,limp= max(i-m+currentmax,0,minj-1),min(i+n-currentmax+1,n) rngcjm = rngc[limm-1] if i < mini: print('continue') continue isl = islice(s2,limm,limp) rsl = range(limm,limp) zsl = zip(rsl,isl) for j,c2 in zsl: # if bg: # bg[i,j].green=255 if c1 == c2 : if i == 0 or j == 0: newval = 1 else: newval = rngp[j-1]+1 # here we will peak ahead as far as possible # while the two string are matching, # for strings with high similarity # this with give us hints on which part of the # lcs matrix we do not need to explore. # # we do this only once, if we are at # the beginning of the matching streem. if s1[i-1] != s2[j-1] or i==0 or j==0: lookahead = -1 k = min(m-i,n-j) for cx,cy in zip(s1[i:i+k],s2[j:j+k]): if cx==cy: lookahead +=1 else: break # if bg: # for xx in range(0,lookahead): # bg[i+xx,j+xx].blue=255 tmp = rngc[j]+lookahead # if we are on i,j and have a value M # then it is useless to process columns that have : # - a j value lower than M-j # - a i value lower than M-i lminj=tmp-j lmini=tmp-i if lmini > mini: mini=lmini if lminj > minj: minj=lminj for xx in range(0,minj): rngp[xx]=tmp-1 rngc[xx]=tmp-1 # if bg: # for xx in range(0,lminj): # for lh in range(i,m): # bg[lh,xx].red =255 # for xx in range(0,lmini): # for lh in range(j,n): # bg[xx,lh].red =255 # bg[i+lookahead,j+lookahead].red =255 if j >= limp+1: break if tmp > currentmax: currentmax = tmp assert(currentmax <=m) assert(currentmax <=n) limp= min(i+n-currentmax+1,n) if newval > currentmax: currentmax = newval else : b = rngp[j] newval = rngcjm if rngcjm > b else b # assert(newval <= i+1) # assert(newval <= j+1) rngc[j] = rngcjm = newval print(rngc) print('==',rngc) return rngc[-2]
bsd-3-clause
-6,169,857,475,565,440,000
36.956204
79
0.476053
false
kartvep/Combaine
combaine/common/configloader/config.py
1
2022
import yaml import json import os from functools import partial from combaine.common.loggers import CommonLogger from combaine.common import constants __all__ = ["FormatError", "MissingConfigError", "parse_agg_cfg", "parse_parsing_cfg", "parse_common_cfg"] class ConfigError(Exception): pass class FormatError(ConfigError): def __init__(self, msg): self.msg = msg def __repr__(self): return "Invalid file format %s. Only JSON and YAML are allowed." % self.msg def __str__(self): return self.__repr__() class MissingConfigError(ConfigError): def __init__(self, msg): self.msg = msg def __repr__(self): return "Missing config file: %s.(%s)" % (self.msg, '|'.join(constants.VALID_CONFIG_EXTENSIONS)) def __str__(self): return self.__repr__() def _handle_json(data): try: return json.loads(data) except ValueError as err: return None def _handle_yaml(data): try: return yaml.load(data) except yaml.YAMLError as err: return None def _combaine_config(path, name): path = path.rstrip('/') L = CommonLogger() cfg = [_cfg for _cfg in ("%s/%s.%s" % (path, name, ext) for ext in constants.VALID_CONFIG_EXTENSIONS) if os.path.isfile(_cfg)] if len(cfg) == 0: raise MissingConfigError("%s/%s" % (path, name)) elif len(cfg) > 1: L.warning("More than one config with name %s. Use %s" % (name, cfg[0])) with open(cfg[0]) as f: _data = f.read() data = _handle_yaml(_data) or _handle_json(_data) if data is None: raise FormatError("%s/%s" % (path, name)) else: return data parse_common_cfg = partial(_combaine_config, constants.COMMON_PATH) parse_agg_cfg = partial(_combaine_config, constants.AGG_PATH) parse_parsing_cfg = partial(_combaine_config, constants.PARS_PATH) if __name__ == "__main__": print parse_agg_cfg("http_ok") print parse_parsing_cfg("photo_proxy") print parse_common_cfg("combaine")
lgpl-3.0
-3,035,238,920,399,240,700
25.25974
130
0.624135
false
edwards-lab/MVtest
tests/test_cmdline.py
1
17297
#!/usr/bin/env python import sys # For debug, preference local install over all else if "DEBUG" in sys.argv: sys.path.insert(0, "../") sys.path.insert(0, "../../") # For mvtest sys.path.insert(0, ".") sys.argv.remove("DEBUG") import numpy import os import mvtest import unittest import test_analyze_tped import test_analyze_ped import test_pedigree_parser as test_pedigree_parser import test_transped_parser as test_transped_parser from meanvar import mv_esteq from libgwas.boundary import BoundaryCheck from libgwas.data_parser import DataParser class TestCmdlineTPed(test_analyze_tped.TestBase): def testTPedCmdLineFilenames(self): cmds = "--tfile %s" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual("PhenoCovar", vars.__class__.__name__) self.assertEqual(self.tfam_filename, dataset.tfam_file) self.assertEqual(self.tped_filename, dataset.tped_file) self.assertEqual(2000, len(dataset.families)) self.assertEqual([0]*2000, list(dataset.ind_mask[:, 0])) def testTPedCmdLineFilenamesExcplicit(self): cmds = "--tped %s --tfam %s" % (self.tped_filename, self.tfam_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual("PhenoCovar", vars.__class__.__name__) self.assertEqual(self.tfam_filename, dataset.tfam_file) self.assertEqual(self.tped_filename, dataset.tped_file) self.assertEqual(2000, len(dataset.families)) self.assertEqual([0]*2000, list(dataset.ind_mask[:, 0])) def testTPedCmdLineWithRemove(self): missing = "rs3000,rs4000,rs5000" cmds = "--tfile %s --exclude %s" % (self.tfam_filename.split(".")[0], missing) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual("Parser", dataset.__class__.__name__) self.assertEqual("PhenoCovar", vars.__class__.__name__) self.assertEqual(self.tfam_filename, dataset.tfam_file) self.assertEqual(self.tped_filename, dataset.tped_file) self.assertEqual(2000, len(dataset.families)) self.assertEqual(missing.split(","), list(DataParser.boundary.ignored_rs)) results = [x for x in mv_esteq.RunAnalysis(dataset, vars)] self.assertEqual(6, len(results)) def testTPedCmdLineWithExcludeFile(self): file = open("__exclusions", "w") missing = ["%s:%s" % (i, i) for i in xrange(0, 500)] file.write("\n".join(["%s %s" % (i, i) for i in xrange(0, 500)])) file.close() cmds = "--tfile %s --remove __exclusions" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual("PhenoCovar", vars.__class__.__name__) self.assertEqual(self.tfam_filename, dataset.tfam_file) self.assertEqual(self.tped_filename, dataset.tped_file) self.assertEqual(1500, len(dataset.families)) self.assertEqual([1]*500 + [0]*1500, list(dataset.ind_mask[:, 0])) self.assertEqual(missing, DataParser.ind_exclusions) os.remove("__exclusions") def testTPedCmdLineWithExclude(self): missing = ["%s:%s" % (i, i) for i in xrange(0, 500)] cmds = "--tfile %s --remove %s" % (self.tfam_filename.split(".")[0], ",".join(missing)) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual("PhenoCovar", vars.__class__.__name__) self.assertEqual(self.tfam_filename, dataset.tfam_file) self.assertEqual(self.tped_filename, dataset.tped_file) self.assertEqual(1500, len(dataset.families)) self.assertEqual([1]*500 + [0]*1500, list(dataset.ind_mask[:, 0])) self.assertEqual(",".join(missing), ",".join(DataParser.ind_exclusions)) def testTPedCmdLineWithBP(self): cmds = "--tfile %s --chr=1 --from-bp=1000 --to-bp=5000" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual(BoundaryCheck.chrom, 1) results = [x for x in mv_esteq.RunAnalysis(dataset, vars)] self.assertEqual(5, len(results)) self.assertEqual(1000, results[0].pos) self.assertEqual(2000, results[1].pos) self.assertEqual(3000, results[2].pos) self.assertEqual(4000, results[3].pos) self.assertEqual(5000, results[4].pos) def testTPedCmdLineWithKB(self): cmds = "--tfile %s --chr=1 --from-kb=1 --to-kb=6" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual(BoundaryCheck.chrom, 1) results = [x for x in mv_esteq.RunAnalysis(dataset, vars)] self.assertEqual(6, len(results)) self.assertEqual(1000, results[0].pos) self.assertEqual(2000, results[1].pos) self.assertEqual(3000, results[2].pos) self.assertEqual(4000, results[3].pos) self.assertEqual(5000, results[4].pos) self.assertEqual(6000, results[5].pos) def testTPedCmdLineWithMB(self): cmds = "--tfile %s --chr=1 --from-mb=1 --to-mb=2" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual(1000000, DataParser.boundary.bounds[0]) self.assertEqual(2000000, DataParser.boundary.bounds[1]) def testTPedCmdLineWithSNPs(self): cmds = "--tfile %s --chr=1 --snps=rs2000-rs4000" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual(BoundaryCheck.chrom, 1) results = [x for x in mv_esteq.RunAnalysis(dataset, vars)] self.assertEqual(3, len(results)) self.assertEqual(2000, results[0].pos) self.assertEqual(3000, results[1].pos) self.assertEqual(4000, results[2].pos) def testTPedCmdLineMAF(self): cmds = "--tfile %s --maf=0.3" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) maf = [0.30225, 0.3075, 0.31, 0.3025, 0.30625] i=0 for snp in dataset: self.assertAlmostEqual(maf[i], snp.maf) i += 1 def testTPedCmdLineMaxMAF(self): cmds = "--tfile %s --max-maf=0.3" % (self.tfam_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) maf = [0.29925, 0.28775, 0.295, 0.2975] i=0 for snp in dataset: self.assertAlmostEqual(maf[i], snp.maf) i += 1 class TestCmdlinePed(test_analyze_ped.TestBase): def testPedCmdLineFilenames(self): cmds = "--file %s" % (self.ped_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) self.assertEqual("PhenoCovar", vars.__class__.__name__) self.assertEqual(self.map_filename, dataset.mapfile) self.assertEqual(self.ped_filename, dataset.datasource) self.assertEqual(9, len(dataset.markers)) def testPedCmdLineMAF(self): cmds = "--file %s --maf=0.3" % (self.ped_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) maf = [0.30225, 0.3075, 0.31, 0.3025, 0.30625] i=0 for snp in dataset: self.assertAlmostEqual(maf[i], snp.maf, places=4) i += 1 def testPedCmdLineMaxMAF(self): cmds = "--file %s --max-maf=0.3" % (self.ped_filename.split(".")[0]) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) maf = [0.29925, 0.28775, 0.295, 0.2975] i=0 for snp in dataset: self.assertAlmostEqual(maf[i], snp.maf) i += 1 class TestCmdLineSimplePed(test_pedigree_parser.TestBase): def testPedCmdLineMIND(self): cmds = "--ped %s --map %s --mind=0.5" % (self.ped_filename_missing, self.map_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], [-1, -1, -1, -1, -1, 1, -1, -1, -1, 0, 0, 1], [-1, 2, 1, 1, 0, 0, 0, 2, 1, 1, 0, 0], [-1, 1, 2, 1, 1, 0, 0, 1, 2, 1, 1, 0], [-1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 0, 0], [-1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0] ] mapdata = [x.strip().split() for x in open(self.map_filename).readlines()] index = 0 for snp in dataset: self.assertEqual(genotypes[index][1:], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(7, index) def testPedCmdLineMIND2(self): cmds = "--ped %s --map %s --mind=0.10" % (self.ped_filename_missing, self.map_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [ 0, 0, 1, 0], [ 1, 0, 0, 1], [ 0, 1, 0, 0], [ 0, 1, 1, 0], [ 0, 1, 0, 0], [ 0, 0, 0, 0], [0, 0, 0, 0] ] mapdata = [x.strip().split() for x in open(self.map_filename).readlines()] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(5, index) # Last two are fixed def testPedCmdLineGENO(self): cmds = "--ped %s --map %s --geno=0.50" % (self.ped_filename_missing, self.map_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], [-1, 2, 1, 1, 0, 0, 0, 2, 1, 1, 0, 0], [-1, 1, 2, 1, 1, 0, 0, 1, 2, 1, 1, 0], [-1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 0, 0], [-1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0] ] mapdata = [['1', 'rs0001', '0', '500'], ['1', 'rs0003', '0', '25000'], ['1', 'rs0004', '0', '45000'], ['2', 'rs0005', '0', '750'], ['2', 'rs0006', '0', '10000'], ['2', 'rs0007', '0', '25000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(6, index) def testPedCmdLineGENO2(self): cmds = "--ped %s --map %s --geno=0.05" % (self.ped_filename_missing, self.map_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0] ] mapdata = [['1', 'rs0001', '0', '500'], ['2', 'rs0007', '0', '25000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(2, index) def testCmdlineMap3File(self): cmds = "--ped %s --map %s --map3 --geno=0.05" % (self.ped_filename_missing, self.map3_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0] ] mapdata = [['1', 'rs0001', '0', '500'], ['2', 'rs0007', '0', '25000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(2, index) class TestCmdLineSimpleTPed(test_transped_parser.TestBase): def testTPedCmdLineMIND(self): cmds = "--tped %s --tfam %s --mind=0.5" % (self.miss_tped_filename, self.tfam_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [0, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1], [1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1], [0, 1, 1, 0, 0, 0, 2, 1, 1, 0, 0], [0, 2, 1, 1, 0, 0, 1, 2, 1, 1, 0], [1, 0, 1, 0, 0, 1, 2, 0, 1, 0, 0], [1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0] ] mapdata = [['1', 'rs0001', '0', '500'], ['1', 'rs0002', '0', '10000'], ['1', 'rs0003', '0', '25000'], ['1', 'rs0004', '0', '45000'], ['2', 'rs0005', '0', '750'], ['2', 'rs0006', '0', '10000'], ['2', 'rs0007', '0', '25000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(7, index) def testTPedCmdLineMIND2(self): cmds = "--tped %s --tfam %s --mind=0.1" % (self.miss_tped_filename, self.tfam_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [0, 1], [1, 1], [0, 0], [0, 0], [1, 0], [1, 0], [0, 0] ] mapdata = [['1', 'rs0001', '0', '500'], ['1', 'rs0002', '0', '10000'], ['1', 'rs0003', '0', '25000'], ['1', 'rs0004', '0', '45000'], ['2', 'rs0005', '0', '750'], ['2', 'rs0006', '0', '10000'], ['2', 'rs0007', '0', '25000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(7, index) # Last two are fixed def testTPedCmdLineGENO(self): cmds = "--tped %s --tfam %s --geno=0.5" % (self.miss_tped_filename, self.tfam_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1], [0, -1, 1, 1, 0, 0, 0, 2, 1, 1, 0, 0], [0, -1, 2, 1, 1, 0, 0, 1, 2, 1, 1, 0], [1, -1, 0, 1, 0, 0, 1, 2, 0, 1, 0, 0], [1, -1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, -1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0] ] mapdata = [['1', 'rs0002', '0', '10000'], ['1', 'rs0003', '0', '25000'], ['1', 'rs0004', '0', '45000'], ['2', 'rs0005', '0', '750'], ['2', 'rs0006', '0', '10000'], ['2', 'rs0007', '0', '25000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(6, index) def testTPedCmdLineGENO2(self): cmds = "--tped %s --tfam %s --geno=0.05" % (self.miss_tped_filename, self.tfam_filename) app = mvtest.MVTestApplication() dataset,vars = app.LoadCmdLine(cmds.split(" ")) genotypes = [ [1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1] ] mapdata = [['1', 'rs0002', '0', '10000']] index = 0 for snp in dataset: self.assertEqual(genotypes[index], list(snp.genotype_data)) self.assertEqual(int(mapdata[index][0]), snp.chr) self.assertEqual(int(mapdata[index][3]), snp.pos) self.assertEqual(mapdata[index][1], snp.rsid) index += 1 self.assertEqual(1, index) if __name__ == "__main__": unittest.main()
gpl-3.0
7,054,703,168,624,016,000
38.672018
109
0.543389
false
myriadrf/pyLMS7002M
pyLMS7002M/LimeSDRMini.py
1
11270
#*************************************************************** #* Name: LimeSDRMini.py #* Purpose: Class implementing LimeSDRMini functions #* Author: Lime Microsystems () #* Created: 2018-04-16 #* Copyright: Lime Microsystems (limemicro.com) #* License: #************************************************************** from weakproxy import * from copy import copy from LMS7002 import * from timeit import default_timer as timer import atexit from cyLimeLib import * class LimeSDRMini(object): def __init__(self, fRef = 40.0e6, verbose=0): """ Initialize communication with LimeSDRMini. """ boards = cyLimeLib.getDeviceList() if len(boards)==0: raise ValueError("LimeSDR not found") self.cyDev = None for i in range(0,len(boards)): if "LimeSDR Mini" in boards[i]: self.cyDev = cyLimeLib(boards[i]) break if self.cyDev==None: raise ValueError("LimeSDRMini not found") self.usb = self.cyDev # http://stackoverflow.com/questions/8907905/del-myclass-doesnt-call-object-del # https://docs.python.org/3/reference/datamodel.html#object.__del__ # solution is to avoid __del__, define an explict close() and call it atexit atexit.register(self.close) #self.usb.setConfiguration() self.verbose = verbose self.bulkControl = False self.fRef = fRef # reference frequency FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD = self.getInfo() if DEV_TYPE!=17: ret = "FW_VER : "+str(FW_VER)+"\n" ret += "DEV_TYPE : "+str(DEV_TYPE)+"\n" ret += "LMS_PROTOCOL_VER : " + str(LMS_PROTOCOL_VER)+"\n" ret += "HW_VER : " + str(HW_VER)+"\n" ret += "EXP_BOARD : " + str(EXP_BOARD)+"\n" raise ValueError("The board is not LimeSDR.\nBoard info:\n"+ret) if verbose>0: self.printInfo() # # Initialize on-board chips # self.LMS7002 = LMS7002(SPIwriteFn=Proxy(self.LMS7002_Write), SPIreadFn=Proxy(self.LMS7002_Read) , verbose=verbose, MCUProgram=Proxy(self.MCUProgram), fRef = self.fRef) self.LMS7002.MIMO = 'MIMO' def close(self): """ Close communication with LimeSDR """ del self.cyDev @staticmethod def findLMS7002(backend="PyUSB"): return cyLimeLib.getDeviceList() def log(self, logMsg): print logMsg def getCommandNumber(self, cmdName): if cmdName == "CMD_GET_INFO": return 0x00 elif cmdName == "CMD_LMS7002_RST": return 0x20 elif cmdName == "LMS_RST_DEACTIVATE": return 0x00 elif cmdName == "LMS_RST_ACTIVATE": return 0x01 elif cmdName == "LMS_RST_PULSE": return 0x02 elif cmdName == "CMD_LMS7002_WR": return 0x21 elif cmdName == "CMD_LMS7002_RD": return 0x22 elif cmdName == "CMD_PROG_MCU": return 0x2C else: raise ValueError("Unknown command "+cmdName) def getLMS7002(self): return self.LMS7002 # # Low level communication # @staticmethod def bytes2string(bytes): """ Convert the byte array to string. Used for serial communication. """ s = "" for i in range(0,len(bytes)): s += chr(bytes[i]) return s @staticmethod def string2bytes(string): """ Convert the string to byte array. Used for serial communication. """ bytes = [0]*int(len(string)) for i in range(0, len(string)): bytes[i] = ord(string[i]) return bytes def sendCommand(self, command, nDataBlocks=0, periphID=0, data=[]): """ Send the command to LimeSDR. Function returns (status, data) """ nData = len(data) if nData>56: raise ValueError("Length of data must be less than 56, "+str(nData)+" bytes given") return self.cyDev.transferLMS64C(command, data) # # Utility functions # def getInfo(self): """ Get the information about LimeSDR. Function returns (FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD) """ command = self.getCommandNumber("CMD_GET_INFO") status, rxData = self.sendCommand(command) if status != 1: raise IOError("Command returned with status "+str(status)) FW_VER = rxData[0] DEV_TYPE = rxData[1] LMS_PROTOCOL_VER = rxData[2] HW_VER = rxData[3] EXP_BOARD = rxData[4] return (FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD) def printInfo(self): """ Print info about LimeSDR """ FW_VER, DEV_TYPE, LMS_PROTOCOL_VER, HW_VER, EXP_BOARD = self.getInfo() self.log("FW_VER : "+str(FW_VER)) self.log("DEV_TYPE : "+str(DEV_TYPE)) self.log("LMS_PROTOCOL_VER : " + str(LMS_PROTOCOL_VER)) self.log("HW_VER : " + str(HW_VER)) self.log("EXP_BOARD : " + str(EXP_BOARD)) def LMS7002_Reset(self, rstType="pulse"): """ Reset LMS7002. rstType specifies the type of reset: pulse - activate and deactivate reset activate - activate reset deactivate - deactivate reset """ command = self.getCommandNumber("CMD_LMS7002_RST") if rstType=="pulse": data = [self.getCommandNumber("LMS_RST_PULSE")] elif rstType=="activate": data = [self.getCommandNumber("LMS_RST_ACTIVATE")] elif rstType=="deactivate": data = [self.getCommandNumber("LMS_RST_DEACTIVATE")] else: raise ValueError("Invalid reset type "+str(rstType)) rxStatus, rxData = self.sendCommand(command, data=data) if rxStatus != 1: raise IOError("Command returned with status "+str(status)) self.LMS7002.loadResetValues() self.cyDev.LMSInit() def LMS7002_Write(self, regList, packetSize=14): """ Write the data to LMS7002 via SPI interface. regList is a list of registers to write in the format: [ (regAddr, regData), (regAddr, regData), ...] packetSize controls the number of register writes in a single USB transfer """ command = self.getCommandNumber("CMD_LMS7002_WR") nDataBlocks = len(regList) toSend = copy(regList) while len(toSend)>0: nPackets = 0 data = [] while nPackets<packetSize and len(toSend)>0: regAddr, regData = toSend[0] toSend.pop(0) regAddrH = regAddr >> 8 regAddrL = regAddr % 256 regDataH = regData >> 8 regDataL = regData % 256 data += [regAddrH, regAddrL, regDataH, regDataL] nPackets += 1 rxStatus, rxData = self.sendCommand(command, nDataBlocks = nPackets, data=data) if rxStatus != 1: raise IOError("Command returned with status "+str(rxStatus)) def LMS7002_Read(self, regList, packetSize=14): """ Read the data from LMS7002 via SPI interface. regList is a list of registers to read in the format: [ regAddr, regAddr, ...] packetSize controls the number of register writes in a single USB transfer """ command = self.getCommandNumber("CMD_LMS7002_RD") nDataBlocks = len(regList) toRead = copy(regList) regData = [] while len(toRead)>0: nPackets = 0 data = [] while nPackets<packetSize and len(toRead)>0: regAddr = toRead[0] toRead.pop(0) regAddrH = regAddr >> 8 regAddrL = regAddr % 256 data += [regAddrH, regAddrL] nPackets += 1 rxStatus, rxData = self.sendCommand(command, nDataBlocks = nPackets, data=data) if rxStatus != 1: raise IOError("Command returned with status "+str(rxStatus)) for i in range(0, nPackets): regDataH = rxData[i*4+2] regDataL = rxData[i*4+3] regData.append( (regDataH << 8) + regDataL) return regData # # LMS7002 MCU program # def MCUProgram(self, mcuProgram, Mode): ver, rev, mask = self.getLMS7002().chipInfo if mask==1: # MCU has 16k RAM if len(mcuProgram)>16384: raise ValueError("MCU program for mask 1 chips must be less than 16 kB. Given program size:"+str(len(mcuProgram))) if len(mcuProgram)==8192: # Check if program is 8k mcuProgram += [0]*8192 # Extend it to 16k self._MCUProgram_Direct(mcuProgram, Mode) else: # MCU has 8k RAM if len(mcuProgram)>8192: raise ValueError("MCU program for mask 0 chips must be less than 8 kB. Given program size:"+str(len(mcuProgram))) self._MCUProgram_Direct(mcuProgram, Mode) def _MCUProgram_Direct(self, mcuProgram, Mode): """ Write the data to LMS7002 MCU via SPI interface. MCU is programmed directly by using bulk interface MCU commands. mcuProgram is 8192 or 16384 bytes long array holding the MCU program. mode selects the MCU programming mode. """ if Mode not in [0, 1,2,3, 'EEPROM_AND_SRAM', 'SRAM', 'SRAM_FROM_EEPROM']: raise ValueError("Mode should be [1,2,3, 'EEPROM_AND_SRAM', 'SRAM', 'SRAM_FROM_EEPROM']") if Mode==0: return elif Mode==1 or Mode=='EEPROM_AND_SRAM': mode = 1 elif Mode==2 or Mode=='SRAM': mode = 2 else: mode = 3 if len(mcuProgram)!=8192 and len(mcuProgram)!=16384: raise ValueError("MCU program should be 8192 or 16384 bytes long") toSend = [ (2, 0), (2, mode)] # Write 0 to address 2, write mode to address 2 (mSPI_CTRL) self.LMS7002_Write(toSend) lms7002 = self.getLMS7002() pos = 0 while pos<len(mcuProgram): startTime = timer() while lms7002.mSPI.EMPTY_WRITE_BUFF==0: if timer()-startTime>1: raise IOError("MCU programming timeout") for j in range(0, 4): toSend = [] for i in range(0, 8): toSend.append( (4, mcuProgram[pos]) ) pos += 1 self.LMS7002_Write(toSend) if mode==3: break startTime = timer() while lms7002.mSPI.PROGRAMMED==0: if timer()-startTime>1: raise IOError("MCU programming timeout")
apache-2.0
-7,336,002,023,781,494,000
34.329154
130
0.536823
false
rgblabs/rgbTools
rgbTools/utils/filesystem.py
1
2692
import maya.cmds as cmds def which (program): ''' If application is found, returns path. This works with both full application paths, and applications available within the OS's defined PATH ''' import os def is_exe (fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def getOSPaths (): import os paths = [] for path in os.environ["PATH"].split(os.pathsep): paths.append(path.strip('"')) return paths def getPythonPaths (): import sys paths = [] for pythonPath in sys.path: paths.append(pythonPath) return paths def getUserPaths (): upaths = {} upaths['userAppDir'] = cmds.internalVar(userAppDir=1) upaths['userScriptDir'] = cmds.internalVar(userScriptDir=1) upaths['userPrefDir'] = cmds.internalVar(userPrefDir=1) upaths['userPresetsDir'] = cmds.internalVar(userPresetsDir=1) upaths['userShelfDir'] = cmds.internalVar(userShelfDir=1) upaths['userMarkingMenuDir'] = cmds.internalVar(userMarkingMenuDir=1) upaths['userBitmapsDir'] = cmds.internalVar(userBitmapsDir=1) upaths['userTmpDir'] = cmds.internalVar(userTmpDir=1) upaths['userWorkspaceDir'] = cmds.internalVar(userWorkspaceDir=1) return upaths def getEnvPaths(): import os import sys import maya.mel as mel scriptPaths = mel.eval("getenv \"MAYA_SCRIPT_PATH\"") plugInPaths = mel.eval("getenv \"MAYA_PLUG_IN_PATH\"") pythonPaths = mel.eval("getenv \"PYTHONPATH\"") iconPaths = mel.eval("getenv \"XBMLANGPATH\"") pathPaths = mel.eval("getenv \"PATH\"") sysPaths = sys.path return { 'MAYA_SCRIPT_PATH' : scriptPaths.split(os.pathsep), 'MAYA_PLUG_IN_PATH' : plugInPaths.split(os.pathsep), 'PYTHONPATH' : pythonPaths.split(os.pathsep), 'XBMLANGPATH' : iconPaths.split(os.pathsep), 'PATH' : pathPaths.split(os.pathsep), 'sys' : sysPaths } def getCurrentFilePath (): return cmds.file(query=True, sceneName=True) def crashRecoverDialog (): dirpath = cmds.internalVar(userTmpDir=1) mask = dirpath+'*.ma' filepath = cmds.fileDialog(title='Recover Crash File...', directoryMask=mask) if filepath is not '': cmds.file(filepath, open=True) cmds.file(renameToSave=True)
mit
-6,761,105,692,595,262,000
28.911111
81
0.635587
false
BarrelfishOS/barrelfish
tools/harness/tests/irqtest.py
1
2241
########################################################################## # Copyright (c) 2011, ETH Zurich. # All rights reserved. # # This file is distributed under the terms in the attached LICENSE file. # If you do not find this file, copies can be found by writing to: # ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. ########################################################################## import datetime import re import tests from common import TestCommon from results import PassFailResult #IRQTEST_TIMEOUT = datetime.timedelta(minutes=5) class IRQTestCommon(TestCommon): '''PCI IRQ test''' def get_modules(self, build, machine): modules = super(IRQTestCommon, self).get_modules(build, machine) # This makes kaluga start the irqtest binary for e1000 cards modules.add_module("e1000n_irqtest", ["auto"]) return modules def get_finish_string(self): return "TEST " def process_data(self, testdir, rawiter): for line in rawiter: if line.startswith("TEST SUCCESS"): return PassFailResult(True) return PassFailResult(False) @tests.add_test class IRQTestLegacy(IRQTestCommon): '''PCI Legacy IRQ test''' name = "irqtestlegacy" def get_modules(self, build, machine): modules = super(IRQTestLegacy, self).get_modules(build, machine) # This makes kaluga start the irqtest binary for e1000 cards modules.add_module_arg("kaluga","add_device_db=device_db_irqtest_legacy") return modules def get_finish_string(self): return "TEST " def process_data(self, testdir, rawiter): for line in rawiter: if line.startswith("TEST SUCCESS"): return PassFailResult(True) return PassFailResult(False) @tests.add_test class IRQTestMSIX(IRQTestCommon): '''PCI MSIX IRQ test''' name = "irqtestmsix" def get_modules(self, build, machine): modules = super(IRQTestMSIX, self).get_modules(build, machine) # This makes kaluga start the irqtest binary for e1000 cards modules.add_module_arg("kaluga","add_device_db=device_db_irqtest_msix") return modules
mit
-1,460,101,033,456,651,800
34.015625
81
0.62606
false
edvisees/sciDP
nn_passage_tagger.py
1
15369
import warnings import sys import codecs import numpy import argparse import theano import json import pickle from rep_reader import RepReader from util import read_passages, evaluate, make_folds from keras.models import Sequential, Graph, model_from_json from keras.layers.core import TimeDistributedDense, Dropout from keras.layers.recurrent import LSTM, GRU from keras.callbacks import EarlyStopping from attention import TensorAttention from keras_extensions import HigherOrderTimeDistributedDense class PassageTagger(object): def __init__(self, word_rep_file=None, pickled_rep_reader=None): if pickled_rep_reader: self.rep_reader = pickled_rep_reader elif word_rep_file: self.rep_reader = RepReader(word_rep_file) self.input_size = self.rep_reader.rep_shape[0] self.tagger = None def make_data(self, trainfilename, use_attention, maxseqlen=None, maxclauselen=None, label_ind=None, train=False): print >>sys.stderr, "Reading data.." str_seqs, label_seqs = read_passages(trainfilename, is_labeled=train) print >>sys.stderr, "Sample data for train:" if train else "Sample data for test:" print >>sys.stderr, zip(str_seqs[0], label_seqs[0]) if not label_ind: self.label_ind = {"none": 0} else: self.label_ind = label_ind seq_lengths = [len(seq) for seq in str_seqs] if not maxseqlen: maxseqlen = max(seq_lengths) if not maxclauselen: if use_attention: clauselens = [] for str_seq in str_seqs: clauselens.extend([len(clause.split()) for clause in str_seq]) maxclauselen = max(clauselens) X = [] Y = [] Y_inds = [] init_word_rep_len = len(self.rep_reader.word_rep) all_word_types = set([]) for str_seq, label_seq in zip(str_seqs, label_seqs): for label in label_seq: if label not in self.label_ind: self.label_ind[label] = len(self.label_ind) if use_attention: x = numpy.zeros((maxseqlen, maxclauselen, self.input_size)) else: x = numpy.zeros((maxseqlen, self.input_size)) y_ind = numpy.zeros(maxseqlen) seq_len = len(str_seq) # The following conditional is true only when we've already trained, and one of the sequences in the test set is longer than the longest sequence in training. if seq_len > maxseqlen: str_seq = str_seq[:maxseqlen] seq_len = maxseqlen if train: for i, (clause, label) in enumerate(zip(str_seq, label_seq)): clause_rep = self.rep_reader.get_clause_rep(clause) for word in clause.split(): all_word_types.add(word) if use_attention: if len(clause_rep) > maxclauselen: clause_rep = clause_rep[:maxclauselen] x[-seq_len+i][-len(clause_rep):] = clause_rep else: x[-seq_len+i] = numpy.mean(clause_rep, axis=0) y_ind[-seq_len+i] = self.label_ind[label] X.append(x) Y_inds.append(y_ind) else: for i, clause in enumerate(str_seq): clause_rep = self.rep_reader.get_clause_rep(clause) for word in clause.split(): all_word_types.add(word) if use_attention: if len(clause_rep) > maxclauselen: clause_rep = clause_rep[:maxclauselen] x[-seq_len+i][-len(clause_rep):] = clause_rep else: x[-seq_len+i] = numpy.mean(clause_rep, axis=0) X.append(x) final_word_rep_len = len(self.rep_reader.word_rep) oov_ratio = float(final_word_rep_len - init_word_rep_len)/len(all_word_types) print >>sys.stderr, "OOV ratio: %f" % oov_ratio for y_ind in Y_inds: y = numpy.zeros((maxseqlen, len(self.label_ind))) for i, y_ind_i in enumerate(y_ind): y[i][y_ind_i] = 1 Y.append(y) self.rev_label_ind = {i: l for (l, i) in self.label_ind.items()} return seq_lengths, numpy.asarray(X), numpy.asarray(Y) def get_attention_weights(self, X_test): if not self.tagger: raise RuntimeError, "Tagger not trained yet!" inp = self.tagger.get_input() att_out = None for layer in self.tagger.layers: if layer.get_config()['name'].lower() == "tensorattention": att_out = layer.get_output() break if not att_out: raise RuntimeError, "No attention layer found!" f = theano.function([inp], att_out) return f(X_test) def predict(self, X, bidirectional, test_seq_lengths=None, tagger=None): if not tagger: tagger = self.tagger if not tagger: raise RuntimeError, "Tagger not trained yet!" if test_seq_lengths is None: # Determining actual lengths sans padding x_lens = [] for x in X: x_len = 0 for i, xi in enumerate(x): if xi.sum() != 0: x_len = len(x) - i break x_lens.append(x_len) else: x_lens = test_seq_lengths if bidirectional: pred_probs = tagger.predict({'input':X})['output'] else: pred_probs = tagger.predict(X) pred_inds = numpy.argmax(pred_probs, axis=2) pred_label_seqs = [] for pred_ind, x_len in zip(pred_inds, x_lens): pred_label_seq = [self.rev_label_ind[pred] for pred in pred_ind][-x_len:] # If the following number is positive, it means we ignored some clauses in the test passage to make it the same length as the ones we trained on. num_ignored_clauses = max(0, x_len - len(pred_label_seq)) # Make labels for those if needed. if num_ignored_clauses > 0: warnings.warn("Test sequence too long. Ignoring %d clauses at the beginning and labeling them none." % num_ignored_clauses) ignored_clause_labels = ["none"] * num_ignored_clauses pred_label_seq = ignored_clause_labels + pred_label_seq pred_label_seqs.append(pred_label_seq) return pred_probs, pred_label_seqs, x_lens def fit_model(self, X, Y, use_attention, att_context, bidirectional): print >>sys.stderr, "Input shape:", X.shape, Y.shape early_stopping = EarlyStopping(patience = 2) num_classes = len(self.label_ind) if bidirectional: tagger = Graph() tagger.add_input(name='input', input_shape=X.shape[1:]) if use_attention: tagger.add_node(TensorAttention(X.shape[1:], context=att_context), name='attention', input='input') lstm_input_node = 'attention' else: lstm_input_node = 'input' tagger.add_node(LSTM(X.shape[-1]/2, return_sequences=True), name='forward', input=lstm_input_node) tagger.add_node(LSTM(X.shape[-1]/2, return_sequences=True, go_backwards=True), name='backward', input=lstm_input_node) tagger.add_node(TimeDistributedDense(num_classes, activation='softmax'), name='softmax', inputs=['forward', 'backward'], merge_mode='concat', concat_axis=-1) tagger.add_output(name='output', input='softmax') tagger.summary() tagger.compile('adam', {'output':'categorical_crossentropy'}) tagger.fit({'input':X, 'output':Y}, validation_split=0.1, callbacks=[early_stopping], show_accuracy=True, nb_epoch=100, batch_size=10) else: tagger = Sequential() word_proj_dim = 50 if use_attention: _, input_len, timesteps, input_dim = X.shape tagger.add(HigherOrderTimeDistributedDense(input_dim=input_dim, output_dim=word_proj_dim)) att_input_shape = (input_len, timesteps, word_proj_dim) print >>sys.stderr, "Attention input shape:", att_input_shape tagger.add(Dropout(0.5)) tagger.add(TensorAttention(att_input_shape, context=att_context)) else: _, input_len, input_dim = X.shape tagger.add(TimeDistributedDense(input_dim=input_dim, input_length=input_len, output_dim=word_proj_dim)) tagger.add(LSTM(input_dim=word_proj_dim, output_dim=word_proj_dim, input_length=input_len, return_sequences=True)) tagger.add(TimeDistributedDense(num_classes, activation='softmax')) tagger.summary() tagger.compile(loss='categorical_crossentropy', optimizer='adam') tagger.fit(X, Y, validation_split=0.1, callbacks=[early_stopping], show_accuracy=True, batch_size=10) return tagger def train(self, X, Y, use_attention, att_context, bidirectional, cv=True, folds=5): if cv: cv_folds = make_folds(X, Y, folds) accuracies = [] fscores = [] for fold_num, ((train_fold_X, train_fold_Y), (test_fold_X, test_fold_Y)) in enumerate(cv_folds): tagger = self.fit_model(train_fold_X, train_fold_Y, use_attention, att_context, bidirectional) pred_probs, pred_label_seqs, x_lens = self.predict(test_fold_X, bidirectional, tagger=tagger) pred_inds = numpy.argmax(pred_probs, axis=2) flattened_preds = [] flattened_targets = [] for x_len, pred_ind, test_target in zip(x_lens, pred_inds, test_fold_Y): flattened_preds.extend(pred_ind[-x_len:]) flattened_targets.extend([list(tt).index(1) for tt in test_target[-x_len:]]) assert len(flattened_preds) == len(flattened_targets) accuracy, weighted_fscore, all_fscores = evaluate(flattened_targets, flattened_preds) print >>sys.stderr, "Finished fold %d. Accuracy: %f, Weighted F-score: %f"%(fold_num, accuracy, weighted_fscore) print >>sys.stderr, "Individual f-scores:" for cat in all_fscores: print >>sys.stderr, "%s: %f"%(self.rev_label_ind[cat], all_fscores[cat]) accuracies.append(accuracy) fscores.append(weighted_fscore) accuracies = numpy.asarray(accuracies) fscores = numpy.asarray(fscores) print >>sys.stderr, "Accuracies:", accuracies print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(accuracies.mean(), accuracies.std() * 2) print >>sys.stderr, "Fscores:", fscores print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(fscores.mean(), fscores.std() * 2) self.tagger = self.fit_model(X, Y, use_attention, att_context, bidirectional) model_ext = "att=%s_cont=%s_bi=%s"%(str(use_attention), att_context, str(bidirectional)) model_config_file = open("model_%s_config.json"%model_ext, "w") model_weights_file_name = "model_%s_weights"%model_ext model_label_ind = "model_%s_label_ind.json"%model_ext model_rep_reader = "model_%s_rep_reader.pkl"%model_ext print >>model_config_file, self.tagger.to_json() self.tagger.save_weights(model_weights_file_name, overwrite=True) json.dump(self.label_ind, open(model_label_ind, "w")) pickle.dump(self.rep_reader, open(model_rep_reader, "wb")) if __name__ == "__main__": argparser = argparse.ArgumentParser(description="Train, cross-validate and run LSTM discourse tagger") argparser.add_argument('--repfile', type=str, help="Gzipped word embedding file") argparser.add_argument('--train_file', type=str, help="Training file. One clause<tab>label per line and passages separated by blank lines.") argparser.add_argument('--cv', help="Do cross validation", action='store_true') argparser.add_argument('--test_files', metavar="TESTFILE", type=str, nargs='+', help="Test file name(s), separated by space. One clause per line and passages separated by blank lines.") argparser.add_argument('--use_attention', help="Use attention over words? Or else will average their representations", action='store_true') argparser.add_argument('--att_context', type=str, help="Context to look at for determining attention (word/clause)") argparser.set_defaults(att_context='word') argparser.add_argument('--bidirectional', help="Bidirectional LSTM", action='store_true') argparser.add_argument('--show_attention', help="When testing, if using attention, also print the weights", action='store_true') args = argparser.parse_args() repfile = args.repfile if args.train_file: trainfile = args.train_file train = True assert args.repfile is not None, "Word embedding file required for training." else: train = False if args.test_files: testfiles = args.test_files test = True else: test = False if not train and not test: raise RuntimeError, "Please specify a train file or test files." use_attention = args.use_attention att_context = args.att_context bid = args.bidirectional show_att = args.show_attention if train: # First returned value is sequence lengths (without padding) nnt = PassageTagger(word_rep_file=repfile) _, X, Y = nnt.make_data(trainfile, use_attention, train=True) nnt.train(X, Y, use_attention, att_context, bid, cv=args.cv) if test: if train: label_ind = nnt.label_ind else: # Load the model from file model_ext = "att=%s_cont=%s_bi=%s"%(str(use_attention), att_context, str(bid)) model_config_file = open("model_%s_config.json"%model_ext, "r") model_weights_file_name = "model_%s_weights"%model_ext model_label_ind = "model_%s_label_ind.json"%model_ext model_rep_reader = "model_%s_rep_reader.pkl"%model_ext rep_reader = pickle.load(open(model_rep_reader, "rb")) print >>sys.stderr, "Loaded pickled rep reader" nnt = PassageTagger(pickled_rep_reader=rep_reader) nnt.tagger = model_from_json(model_config_file.read(), custom_objects={"TensorAttention":TensorAttention, "HigherOrderTimeDistributedDense":HigherOrderTimeDistributedDense}) print >>sys.stderr, "Loaded model:" print >>sys.stderr, nnt.tagger.summary() nnt.tagger.load_weights(model_weights_file_name) print >>sys.stderr, "Loaded weights" label_ind_json = json.load(open(model_label_ind)) label_ind = {k: int(label_ind_json[k]) for k in label_ind_json} print >>sys.stderr, "Loaded label index:", label_ind if not use_attention: assert nnt.tagger.layers[0].name == "timedistributeddense" maxseqlen = nnt.tagger.layers[0].input_length maxclauselen = None else: for l in nnt.tagger.layers: if l.name == "tensorattention": maxseqlen, maxclauselen = l.td1, l.td2 break for test_file in testfiles: print >>sys.stderr, "Predicting on file %s"%(test_file) test_out_file_name = test_file.split("/")[-1].replace(".txt", "")+"_att=%s_cont=%s_bid=%s"%(str(use_attention), att_context, str(bid))+".out" outfile = open(test_out_file_name, "w") test_seq_lengths, X_test, _ = nnt.make_data(test_file, use_attention, maxseqlen=maxseqlen, maxclauselen=maxclauselen, label_ind=label_ind, train=False) print >>sys.stderr, "X_test shape:", X_test.shape pred_probs, pred_label_seqs, _ = nnt.predict(X_test, bid, test_seq_lengths) if show_att: att_weights = nnt.get_attention_weights(X_test.astype('float32')) clause_seqs, _ = read_passages(test_file, is_labeled=True) paralens = [[len(clause.split()) for clause in seq] for seq in clause_seqs] for clauselens, sample_att_weights, pred_label_seq in zip(paralens, att_weights, pred_label_seqs): for clauselen, clause_weights, pred_label in zip(clauselens, sample_att_weights[-len(clauselens):], pred_label_seq): print >>outfile, pred_label, " ".join(["%.4f"%val for val in clause_weights[-clauselen:]]) print >>outfile else: for pred_label_seq in pred_label_seqs: for pred_label in pred_label_seq: print >>outfile, pred_label print >>outfile
apache-2.0
1,109,917,934,340,406,700
47.48265
187
0.65515
false
sevagas/macro_pack
src/vbLib/Base64ToText.py
1
1559
VBA = \ r""" Function Base64ToText(ByVal vCode) Dim oXML, oNode Dim tempString As String tempString = "Msxm" tempString = tempString & "l2.DO" tempString = tempString & "MDoc" tempString = tempString & "ument.3.0" Set oXML = CreateObject(tempString) Set oNode = oXML.CreateElement("base64") oNode.DataType = "bin.base64" oNode.Text = vCode Base64ToText = Stream_BinaryToString(oNode.nodeTypedValue) Set oNode = Nothing Set oXML = Nothing End Function 'Stream_BinaryToString Function '2003 Antonin Foller, http://www.motobit.com 'Binary - VT_UI1 | VT_ARRAY data To convert To a string Private Function Stream_BinaryToString(Binary) Const adTypeText = 2 Const adTypeBinary = 1 'Create Stream object Dim BinaryStream 'As New Stream Dim tmpString As String tmpString = "ADO" tmpString = tmpString & "DB.St" tmpString = tmpString & "ream" Set BinaryStream = CreateObject(tmpString) 'Specify stream type - we want To save binary data. BinaryStream.Type = adTypeBinary 'Open the stream And write binary data To the object BinaryStream.Open BinaryStream.Write Binary 'Change stream type To text/string BinaryStream.Position = 0 BinaryStream.Type = adTypeText 'Specify charset For the output text (unicode) data. BinaryStream.Charset = "us-ascii" 'Open the stream And get text/string data from the object Stream_BinaryToString = BinaryStream.ReadText Set BinaryStream = Nothing End Function """
apache-2.0
-7,680,315,020,665,851,000
27.87037
62
0.695959
false
wevoice/wesub
apps/socialauth/views.py
1
12656
from django.shortcuts import render_to_response, redirect from django.contrib import messages from django.template import RequestContext from django.contrib.auth import authenticate, login from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from django.conf import settings from django.contrib.auth.decorators import login_required from django.contrib.auth.views import logout from django.utils.http import urlencode from auth.backends import OpenIdBackend from socialauth.models import AuthMeta from socialauth.forms import EditProfileForm from thirdpartyaccounts.models import TwitterAccount """ from socialauth.models import YahooContact, TwitterContact, FacebookContact,\ SocialProfile, GmailContact """ from openid_consumer.views import begin from socialauth.lib import oauthtwitter2 as oauthtwitter from socialauth.lib.facebook import get_facebook_signature from oauth import oauth from datetime import datetime from django.utils.http import urlquote from utils.translation import get_user_languages_from_cookie from auth.models import UserLanguage TWITTER_CONSUMER_KEY = getattr(settings, 'TWITTER_CONSUMER_KEY', '') TWITTER_CONSUMER_SECRET = getattr(settings, 'TWITTER_CONSUMER_SECRET', '') def get_url_host(request): # FIXME: Duplication if request.is_secure(): protocol = 'https' else: protocol = 'http' host = request.get_host() return '%s://%s' % (protocol, host) def login_page(request): payload = {'fb_api_key':settings.FACEBOOK_API_KEY,} return render_to_response('socialauth/login_page.html', payload, RequestContext(request)) def twitter_login(request, next=None): callback_url = None if next is not None: callback_url = '%s%s?next=%s' % \ (get_url_host(request), reverse("socialauth_twitter_login_done"), urlquote(next)) twitter = oauthtwitter.TwitterOAuthClient(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET) request_token = twitter.fetch_request_token(callback_url) request.session['request_token'] = request_token.to_string() signin_url = twitter.authorize_token_url(request_token) return HttpResponseRedirect(signin_url) def twitter_login_done(request): request_token = request.session.get('request_token', None) oauth_verifier = request.GET.get("oauth_verifier", None) # If there is no request_token for session, # Means we didn't redirect user to twitter if not request_token: # Redirect the user to the login page, # So the user can click on the sign-in with twitter button return HttpResponse("We didn't redirect you to twitter...") token = oauth.OAuthToken.from_string(request_token) # If the token from session and token from twitter does not match # means something bad happened to tokens if token.key != request.GET.get('oauth_token', 'no-token'): del request.session['request_token'] if request.GET.get('denied', None) is not None: messages.info(request, "Twitter authorization cancelled.") return redirect('profiles:account') messages.error(request, "Something wrong! Tokens do not match...") # Redirect the user to the login page return redirect('auth:login') twitter = oauthtwitter.TwitterOAuthClient(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET) access_token = twitter.fetch_access_token(token, oauth_verifier) request.session['access_token'] = access_token.to_string() if request.session.get('no-login', False): # The user is trying to link a Twitter account to their Amara account. if not request.user.is_authenticated(): messages.error(request, 'You must be logged in.') return redirect('auth:login') try: from socialauth.lib.oauthtwitter import OAuthApi twitter = OAuthApi(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, access_token) userinfo = twitter.GetUserInfo() except Exception, e: # TODO: Raise something more useful here raise e username = userinfo.screen_name try: account = TwitterAccount.objects.get(username=username) if request.user.pk != account.user.pk: messages.error(request, 'Account already linked') return redirect('profiles:account') except TwitterAccount.DoesNotExist: TwitterAccount.objects.create(user=request.user, username=username, access_token=access_token.to_string()) del request.session['no-login'] messages.info(request, 'Successfully linked a Twitter account') return redirect('profiles:account') request.session['access_token'] = access_token.to_string() user = authenticate(access_token=access_token) # if user is authenticated then login user if user: if not user.userlanguage_set.exists(): langs = get_user_languages_from_cookie(request) for l in langs: UserLanguage.objects.get_or_create(user=user, language=l) login(request, user) else: # We were not able to authenticate user # Redirect to login page del request.session['access_token'] del request.session['request_token'] return HttpResponseRedirect(reverse('socialauth_login_page')) # authentication was successful, use is now logged in return HttpResponseRedirect(request.GET.get('next', settings.LOGIN_REDIRECT_URL)) def openid_login(request, confirmed=True): if 'openid_identifier' in request.GET: user_url = request.GET.get('openid_identifier') request.session['openid_provider'] = user_url return begin(request, user_url = user_url, confirmed=confirmed) else: if 'google.com' in request.POST.get('openid_url', ''): request.session['openid_provider'] = 'Google' return begin(request, user_url='https://www.google.com/accounts/o8/id', confirmed=confirmed) elif 'yahoo.com' in request.POST.get('openid_url', ''): request.session['openid_provider'] = 'Yahoo' else: request.session['openid_provider'] = 'Openid' return begin(request, confirmed=confirmed) def gmail_login(request): request.session['openid_provider'] = 'Google' return begin(request, user_url='https://www.google.com/accounts/o8/id') def udacity_login(request, confirmed=True): request.session['openid_provider'] = 'Udacity' return begin(request, user_url='https://www.udacity.com/openid/server', confirmed=confirmed) def gmail_login_complete(request): pass def yahoo_login(request): request.session['openid_provider'] = 'Yahoo' return begin(request, user_url='http://yahoo.com/') def openid_done(request, provider=None, confirmed=True): """ When the request reaches here, the user has completed the Openid authentication flow. He has authorised us to login via Openid, so request.openid is populated. After coming here, we want to check if we are seeing this openid first time. If we are, we will create a new Django user for this Openid, else login the existing openid. """ if not provider: provider = request.session.get('openid_provider', '') if request.openid: #check for already existing associations openid_key = str(request.openid) #authenticate and login if not confirmed: (existing, suggested_email) = OpenIdBackend.pre_authenticate(openid_key=openid_key, request=request, provider=provider) if not existing: if provider == 'Udacity': return redirect('auth:confirm_create_user', 'udacity', suggested_email) elif provider == 'Openid': openid_url = request.GET.get('openid_url', '') response = redirect('auth:confirm_create_user', 'openid', suggested_email) if openid_url: response['Location'] += '?' + urlencode({'openid_url': openid_url}) return response else: return redirect(reverse('auth:confirm_create_user', provider, suggested_email)) email = request.GET.get('email', None) user = authenticate(openid_key=openid_key, request=request, provider=provider, email=email) if user: if not user.userlanguage_set.exists(): langs = get_user_languages_from_cookie(request) for l in langs: UserLanguage.objects.get_or_create(user=user, language=l) login(request, user) next = None if 'openid_next' in request.session: next = request.session.get('openid_next') if 'next' in request.GET: next = request.GET['next'] if next is not None and len(next.strip()) > 0 : return HttpResponseRedirect(next) redirect_url = reverse('profiles:profile', args=(user,)) return HttpResponseRedirect(redirect_url) else: return HttpResponseRedirect(settings.LOGIN_URL) else: return HttpResponseRedirect(settings.LOGIN_URL) def facebook_login_done(request): API_KEY = settings.FACEBOOK_API_KEY API_SECRET = settings.FACEBOOK_SECRET_KEY REST_SERVER = 'http://api.facebook.com/restserver.php' # FB Connect will set a cookie with a key == FB App API Key if the user has been authenticated if API_KEY in request.COOKIES: signature_hash = get_facebook_signature(API_KEY, API_SECRET, request.COOKIES, True) # The hash of the values in the cookie to make sure they're not forged # AND If session hasn't expired if(signature_hash == request.COOKIES[API_KEY]) and (datetime.fromtimestamp(float(request.COOKIES[API_KEY+'_expires'])) > datetime.now()): #Log the user in now. user = authenticate(cookies=request.COOKIES) if user: # if user is authenticated then login user login(request, user) return HttpResponseRedirect(reverse('socialauth_signin_complete')) else: #Delete cookies and redirect to main Login page. del request.COOKIES[API_KEY + '_session_key'] del request.COOKIES[API_KEY + '_user'] return HttpResponseRedirect(reverse('socialauth_login_page')) return HttpResponseRedirect(reverse('socialauth_login_page')) def openid_login_page(request): return render_to_response('openid/index.html', {}, RequestContext(request)) def signin_complete(request): payload = {} return render_to_response('socialauth/signin_complete.html', payload, RequestContext(request)) @login_required def editprofile(request): if request.method == 'POST': edit_form = EditProfileForm(user=request.user, data=request.POST) if edit_form.is_valid(): user = edit_form.save() try: user.authmeta.is_profile_modified = True user.authmeta.save() except AuthMeta.DoesNotExist: pass if user.openidprofile_set.all().count(): openid_profile = user.openidprofile_set.all()[0] openid_profile.is_valid_username = True openid_profile.save() try: #If there is a profile. notify that we have set the username profile = user.get_profile() profile.is_valid_username = True profile.save() except: pass request.user.message_set.create(message='Your profile has been updated.') return HttpResponseRedirect('.') if request.method == 'GET': edit_form = EditProfileForm(user = request.user) payload = {'edit_form':edit_form} return render_to_response('socialauth/editprofile.html', payload, RequestContext(request)) def social_logout(request): # Todo # still need to handle FB cookies, session etc. # let the openid_consumer app handle openid-related cleanup from openid_consumer.views import signout as oid_signout oid_signout(request) # normal logout logout_response = logout(request) if getattr(settings, 'LOGOUT_REDIRECT_URL', None): return HttpResponseRedirect(settings.LOGOUT_REDIRECT_URL) else: return logout_response
agpl-3.0
7,060,478,658,976,568,000
41.469799
145
0.656606
false
itu-oss-project-team/oss-github-analysis-project
github_analysis_tool/analyzer/tf-idf.py
1
3362
import os.path import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from math import log10 import yaml from github_analysis_tool.services.database_service import DatabaseService class Tfidf: def __init__(self, secret_config): # Generate a github_requester with imported GitHub tokens self.__databaseService = DatabaseService(secret_config['mysql']) self.__commits = [] def addCommitToDictionary(self, commit_sha, commit_message): commit_msg = str(commit_message).encode('utf-8') commit_msg = str(commit_msg) #sha, message, tf-idf self.__commits.append([commit_sha, commit_msg, 0]) def printValues(self, commitList): print("size: " + str(len(commitList)) + "\n") for commit in commitList: commit_msg = str(commit[1]) print(commit_msg + " tf-idf: " + str(commit[2])) def generateContainer(self): repos = self.__databaseService.getAllRepos(get_only_ids=True) for repo_id in repos: commits = self.__databaseService.getCommitsOfRepo(repo_id, get_only_shas=False) for commit in commits: self.addCommitToDictionary(commit["sha"], commit["message"]) return def tf_idf(self, keywords, threshold_value=0): scored_commits = [] count_of_all_occurances=0 print("Total number of commits: " + str(len(self.__commits))) #idf calculation for commit in self.__commits: commit_msg = commit[1] for word in commit_msg.split(): for keyword in keywords: if word == keyword: count_of_all_occurances += 1 break idf = log10(len(self.__commits)/count_of_all_occurances) print("idf: " + str(idf)) #tf calculation for each commit message for commit in self.__commits: commit_msg = commit[1] count_of_similarities_in_msg=0 for word in commit_msg.split(): for keyword in keywords: if word == keyword: count_of_similarities_in_msg += 1 score = count_of_similarities_in_msg / len(commit_msg.split()) score = score * idf commit[2] = score if score > threshold_value: #sha, message, score scored_commits.append([commit[0], commit[1], commit[2]]) scored_commits.sort(key=lambda x:x[2]) return scored_commits def main(): with open(os.path.join(os.path.dirname(__file__), os.pardir, 'config_secret.yaml'), 'r') as ymlfile: secret_config = yaml.load(ymlfile) tfidf = Tfidf(secret_config) tfidf.generateContainer() print("\nBUG-FIX COMMITS\n") bugfix_commits = tfidf.tf_idf(["Fix", "fixed", "edit", "edited", "modify", "modified", "correct", "corrected"], 0.0) tfidf.printValues(bugfix_commits) print("\nADD NEW FEATURE COMMITS\n") add_commits = tfidf.tf_idf(["add", "added", "implement", "implemented", "feat", "feature"], 0.0) tfidf.printValues(add_commits) print("\nREMOVE COMMITS\n") remove_commits = tfidf.tf_idf(["delete", "deleted", "remove", "removed"], 0.0) tfidf.printValues(remove_commits) return main()
mit
8,553,894,782,115,481,000
34.020833
120
0.594289
false
hirokihamasaki/irma
frontend/frontend/api/v1_1/controllers/files.py
1
7314
# Copyright (c) 2013-2016 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. import logging from bottle import response, request from frontend.api.v1_1.errors import process_error from frontend.helpers.utils import guess_hash_type from frontend.models.sqlobjects import FileWeb, File from frontend.api.v1_1.schemas import FileWebSchema_v1_1, ScanSchema_v1_1, \ FileSchema_v1_1 from lib.common.utils import decode_utf8 from lib.irma.common.exceptions import IrmaDatabaseResultNotFound file_web_schema = FileWebSchema_v1_1() scan_schema = ScanSchema_v1_1() file_web_schema.context = {'formatted': True} log = logging.getLogger(__name__) file_web_schema_lite = FileWebSchema_v1_1(exclude=['probe_results']) file_web_schema_lite.context = {'formatted': True} def list(db): """ Search a file using query filters (tags + hash or name). Support pagination. :param all params are sent using query method :rtype: dict of 'total': int, 'page': int, 'per_page': int, 'items': list of file(s) found :return: on success 'items' contains a list of files found on error 'msg' gives reason message """ try: name = None if 'name' in request.query: name = decode_utf8(request.query['name']) h_value = request.query.get('hash') search_tags = request.query.get('tags') if search_tags is not None: search_tags = search_tags.split(',') log.debug("name %s h_value %s search_tags %s", name, h_value, search_tags) if name is not None and h_value is not None: raise ValueError("Can't find using both name and hash") # Get values from query or default offset = request.query.get("offset", default=0) offset = int(offset) limit = request.query.get("limit", default=25) limit = int(limit) if name is not None: base_query = FileWeb.query_find_by_name(name, search_tags, db) elif h_value is not None: h_type = guess_hash_type(h_value) if h_type is None: raise ValueError("Hash not supported") base_query = FileWeb.query_find_by_hash( h_type, h_value, search_tags, db) else: # FIXME this is just a temporary way to output # all files, need a dedicated # file route and controller base_query = FileWeb.query_find_by_name("", search_tags, db) # TODO: Find a way to move pagination as a BaseQuery like in # flask_sqlalchemy. # https://github.com/mitsuhiko/flask-sqlalchemy/blob/master/flask_sqlalchemy/__init__.py#L422 items = base_query.limit(limit).offset(offset).all() if offset == 0 and len(items) < limit: total = len(items) else: total = base_query.count() log.debug("Found %s results", total) response.content_type = "application/json; charset=UTF-8" return { 'total': total, 'offset': offset, 'limit': limit, 'items': file_web_schema_lite.dump(items, many=True).data, } except Exception as e: log.exception(e) process_error(e) def get(sha256, db): """ Detail about one file and all known scans summary where file was present (identified by sha256). Support pagination. :param all params are sent using query method :param if alt parameter is "media", response will contains the binary data :rtype: dict of 'total': int, 'page': int, 'per_page': int, :return: on success fileinfo contains file information on success 'items' contains a list of files found on error 'msg' gives reason message """ try: log.debug("h_value %s", sha256) # Check wether its a download attempt or not if request.query.alt == "media": return _download(sha256, db) # Get values from query or default offset = request.query.get("offset", default=0) offset = int(offset) limit = request.query.get("limit", default=25) limit = int(limit) file = File.load_from_sha256(sha256, db) # query all known results not only those with different names base_query = FileWeb.query_find_by_hash("sha256", sha256, None, db, distinct_name=False) # TODO: Find a way to move pagination as a BaseQuery like in # flask_sqlalchemy. # https://github.com/mitsuhiko/flask-sqlalchemy/blob/master/flask_sqlalchemy/__init__.py#L422 items = base_query.limit(limit).offset(offset).all() if offset == 0 and len(items) < limit: total = len(items) else: total = base_query.count() log.debug("offset %d limit %d total %d", offset, limit, total) file_web_schema = FileWebSchema_v1_1(exclude=('probe_results', 'file_infos')) fileinfo_schema = FileSchema_v1_1() # TODO: allow formatted to be a parameter formatted = True fileinfo_schema.context = {'formatted': formatted} response.content_type = "application/json; charset=UTF-8" return { 'file_infos': fileinfo_schema.dump(file).data, 'total': total, 'offset': offset, 'limit': limit, 'items': file_web_schema.dump(items, many=True).data, } except Exception as e: log.exception(e) process_error(e) def add_tag(sha256, tagid, db): """ Attach a tag to a file. """ try: log.debug("h_value %s tagid %s", sha256, tagid) fobj = File.load_from_sha256(sha256, db) fobj.add_tag(tagid, db) db.commit() except Exception as e: log.exception(e) process_error(e) def remove_tag(sha256, tagid, db): """ Remove a tag attached to a file. """ try: log.debug("h_value %s tagid %s", sha256, tagid) fobj = File.load_from_sha256(sha256, db) fobj.remove_tag(tagid, db) db.commit() except Exception as e: log.exception(e) process_error(e) # called by get def _download(sha256, db): """Retrieve a file based on its sha256""" log.debug("h_value %s", sha256) fobj = File.load_from_sha256(sha256, db) # check if file is still present if fobj.path is None: raise IrmaDatabaseResultNotFound("downloading a removed file") # Force download ctype = 'application/octet-stream; charset=UTF-8' # Suggest Filename to sha256 cdisposition = "attachment; filename={}".format(sha256) response.headers["Content-Type"] = ctype response.headers["Content-Disposition"] = cdisposition return open(fobj.path).read()
apache-2.0
4,231,076,620,459,806,000
34.852941
101
0.613071
false
dmccloskey/SBaaS_quantification
template_scripts/import_MQResultsTable.py
1
3766
import sys sys.path.append('C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_base') from SBaaS_base.postgresql_settings import postgresql_settings from SBaaS_base.postgresql_orm import postgresql_orm # read in the settings file filename = 'C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_base/settings.ini'; pg_settings = postgresql_settings(filename); # connect to the database from the settings file pg_orm = postgresql_orm(); pg_orm.set_sessionFromSettings(pg_settings.database_settings); session = pg_orm.get_session(); engine = pg_orm.get_engine(); # your app... path2Lims = 'C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_LIMS'; sys.path.append(path2Lims) sys.path.append('C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_quantification') sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/io_utilities') sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/calculate_utilities') sys.path.append('C:/Users/dmccloskey-sbrg/Documents/GitHub/quantification_analysis') ## initialize the biologicalMaterial_geneReferences #from SBaaS_LIMS.lims_biologicalMaterial_io import lims_biologicalMaterial_io #limsbiomat = lims_biologicalMaterial_io(session,engine,pg_settings.datadir_settings); #limsbiomat.drop_lims_biologicalMaterial(); #limsbiomat.initialize_lims_biologicalMaterial(); #limsbiomat.reset_lims_biologicalMaterial(); #limsbiomat.import_biologicalMaterialMassVolumeConversion_add(path2Lims+'/'+'data/tests/analysis_quantification/140826_biologicalMaterial_massVolumeConversion_MG1655.csv'); ## initialize the sample information #from SBaaS_LIMS.lims_sample_execute import lims_sample_execute #limssample = lims_sample_execute(session,engine,pg_settings.datadir_settings); #limssample.drop_lims_sample(); #limssample.initialize_lims_sample(); #limssample.reset_lims_sample(); ## initialize the experiment #from SBaaS_LIMS.lims_experiment_execute import lims_experiment_execute #limsexperiment = lims_experiment_execute(session,engine,pg_settings.datadir_settings); #limsexperiment.drop_lims_experimentTypes(); #limsexperiment.initialize_lims_experimentTypes(); #limsexperiment.reset_lims_experimentTypes(); #limsexperiment.drop_lims_experiment(); #limsexperiment.initialize_lims_experiment(); #limsexperiment.reset_lims_experiment('chemoCLim01'); #limsexperiment.execute_deleteExperiments(['chemoCLim01']); #limsexperiment.execute_makeExperimentFromSampleFile('data/tests/analysis_quantification/150727_Quantification_chemoCLim01_sampleFile01.csv',1,[10.0]); #limsexperiment.execute_makeExperimentFromCalibrationFile('data/tests/analysis_quantification/150805_Quantification_chemoCLim01_calibrationFile01.csv'); ## export the analyst acquisition batch files #limsexperiment.execute_makeBatchFile('chemoCLim01', '150805','data/tests/analysis_quantification/150727_Quantification_chemoCLim01.txt',experiment_type_I=4); #make theresults table from SBaaS_quantification.stage01_quantification_MQResultsTable_execute import stage01_quantification_MQResultsTable_execute exmqrt01 = stage01_quantification_MQResultsTable_execute(session,engine,pg_settings.datadir_settings); exmqrt01.drop_dataStage01_quantification_MQResultsTable(); exmqrt01.initialize_dataStage01_quantification_MQResultsTable(); exmqrt01.execute_deleteExperimentFromMQResultsTable('chemoCLim01',sample_types_I = ['Quality Control','Unknown','Standard','Blank']) exmqrt01.import_dataStage01MQResultsTable_add('data/tests/analysis_quantification/150805_140521_Quantification_chemoCLim01_calibrators01.csv'); exmqrt01.import_dataStage01MQResultsTable_add('data/tests/analysis_quantification/150805_Quantification_chemoCLim01_samples02.csv'); exmqrt01.export_dataStage01MQResultsTable_metricPlot_js('chemoCLim01',component_names_I = ['fdp.fdp_1.Light'],measurement_I='calculated_concentration');
mit
-7,147,104,687,622,559,000
59.758065
172
0.827138
false
qisanstudio/qsapp-suibe
src/suibe/models/channel.py
1
3718
# -*- coding: utf-8 -*- from __future__ import unicode_literals from jinja2 import Markup from flask import url_for from studio.core.engines import db from sqlalchemy.ext.hybrid import hybrid_property from suibe.models.article import ArticleModel __all__ = [ 'NaviChannelModel', 'ChannelModel', 'ChannelSummaryModel', 'NaviModel', ] def articles_order_by(): return [db.desc(ArticleModel.is_sticky), db.desc(ArticleModel.date_published)] class NaviChannelModel(db.Model): __tablename__ = 'navi_channel' navi_id = db.Column(db.Integer(), db.ForeignKey('navi.id'), primary_key=True, index=True) channel_id = db.Column(db.Integer(), db.ForeignKey('channel.id'), primary_key=True, index=True) class ChannelModel(db.Model): __tablename__ = 'channel' id = db.Column(db.Integer(), nullable=False, primary_key=True) parent_id = db.Column(db.Integer(), db.ForeignKey('channel.id'), index=True) name = db.Column(db.Unicode(256), nullable=False, unique=True, index=True) date_created = db.Column(db.DateTime(timezone=True), nullable=False, index=True, server_default=db.func.current_timestamp()) _summary = db.relationship( 'ChannelSummaryModel', backref=db.backref('channel', lazy='joined', innerjoin=True), primaryjoin='ChannelModel.id==ChannelSummaryModel.id', foreign_keys='[ChannelSummaryModel.id]', uselist=False, cascade='all, delete-orphan') @hybrid_property def summary(self): return self._summary.content @summary.setter def summary_setter(self, value): if not self._summary: self._summary = ChannelSummaryModel(id=self.id, content=value) self._summary.content = value @property def html(self): return Markup(self.summary) parent = db.relationship('ChannelModel', remote_side=[id], backref='channels') articles = db.relationship( 'ArticleModel', primaryjoin='and_(ChannelModel.id==ArticleModel.cid,' 'ArticleModel.date_published<=func.now())', order_by=articles_order_by, foreign_keys='[ArticleModel.cid]', passive_deletes='all', lazy='dynamic') all_articles = db.relationship( 'ArticleModel', primaryjoin='ChannelModel.id==ArticleModel.cid', order_by=articles_order_by, foreign_keys='[ArticleModel.cid]', backref=db.backref( 'channel', lazy='joined', innerjoin=True), passive_deletes='all', lazy='dynamic') @property def url(self): return url_for("views.channel", cid=self.id) def __str__(self): return self.name class ChannelSummaryModel(db.Model): __tablename__ = 'channel_summary' id = db.Column(db.Integer(), db.ForeignKey('channel.id'), nullable=False, primary_key=True) content = db.Column(db.UnicodeText(), nullable=False) class NaviModel(db.Model): __tablename__ = 'navi' id = db.Column(db.Integer(), nullable=False, primary_key=True) name = db.Column(db.Unicode(256), nullable=False, unique=True, index=True) date_created = db.Column(db.DateTime(timezone=True), nullable=False, index=True, server_default=db.func.current_timestamp()) channels = db.relationship('ChannelModel', secondary=NaviChannelModel.__table__) def __str__(self): return self.name
mit
-6,052,231,150,220,093,000
31.060345
78
0.600861
false
martinohanlon/pgzero-pong
pong.py
1
3151
from math import sin, cos, radians from time import sleep #setup the constants WIDTH = 500 HEIGHT = 300 BALLSPEED = 10 PADDLESPEED = 5 MAXBOUNCEANGLE = 75 def reset_game(angle): #setup ball properties ball.pos = WIDTH / 2, HEIGHT / 2 ball.x_float = float(ball.x) ball.y_float = float(ball.y) ball.angle = angle ball.x_vel = BALLSPEED * cos(radians(ball.angle)) ball.y_vel = BALLSPEED * sin(radians(ball.angle)) #position the paddles pad1.pos = 10, HEIGHT / 2 pad2.pos = WIDTH - 10, HEIGHT / 2 #create a rectangle of the playing area screenRect = Rect(10,0,WIDTH - 10,HEIGHT) #create ball ball = Actor('ball') #create paddles pad1 = Actor('paddle') pad2 = Actor('paddle') #reset the game reset_game(180) #setup the goals goals = [0, 0] def draw(): screen.clear() ball.draw() pad1.draw() pad2.draw() def update(): #move the paddles if keyboard.q: pad1.top -= PADDLESPEED if keyboard.a: pad1.top += PADDLESPEED if keyboard.k: pad2.top -= PADDLESPEED if keyboard.m: pad2.top += PADDLESPEED #move the ball ball_old_x = ball.x_float ball_old_y = ball.y_float ball.x_float = ball.x_float + ball.x_vel ball.y_float = ball.y_float + ball.y_vel ball.x = int(round(ball.x_float)) ball.y = int(round(ball.y_float)) #move the ball back to where it was? reset_ball = False #has the ball left the screen? if not screenRect.contains(ball): #did it hit the top or bottom? if ball.top < 0 or ball.bottom > HEIGHT: ball.y_vel *= -1 reset_ball = True #it must have hit the side else: if ball.left < 10: print("Player 2 goal") goals[1] += 1 reset_game(180) sleep(2) print("Score {} : {}".format(goals[0], goals[1])) elif ball.right > WIDTH - 10: print("player 1 goal") goals[1] += 1 reset_game(0) sleep(2) print("Score {} : {}".format(goals[0], goals[1])) #has the ball hit a paddle if pad1.colliderect(ball): #work out the bounce angle bounce_angle = ((ball.y - pad1.y) / (pad1.height / 2)) * MAXBOUNCEANGLE ball.angle = max(0 - MAXBOUNCEANGLE, min(MAXBOUNCEANGLE, bounce_angle)) #work out the ball velocity ball.x_vel = BALLSPEED * cos(radians(ball.angle)) ball.y_vel = BALLSPEED * sin(radians(ball.angle)) reset_ball = True elif pad2.colliderect(ball): bounce_angle = 180 - (((ball.y - pad2.y) / (pad2.height / 2)) * MAXBOUNCEANGLE) ball.angle = max(180 - MAXBOUNCEANGLE, min(180 + MAXBOUNCEANGLE, bounce_angle)) ball.x_vel = BALLSPEED * cos(radians(ball.angle)) ball.y_vel = BALLSPEED * sin(radians(ball.angle)) reset_ball = True if reset_ball: ball.x_float = ball_old_x ball.y_float = ball_old_y ball.x = int(round(ball.x_float)) ball.y = int(round(ball.y_float))
mit
6,057,346,098,873,700,000
25.478992
87
0.569026
false
tensorflow/gan
tensorflow_gan/examples/mnist_estimator/train_test.py
1
1617
# coding=utf-8 # Copyright 2020 The TensorFlow GAN Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for mnist_estimator.train.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v1 as tf from tensorflow_gan.examples.mnist_estimator import train_lib mock = tf.test.mock class TrainTest(tf.test.TestCase): @mock.patch.object(train_lib, 'data_provider', autospec=True) def test_full_flow(self, mock_data_provider): hparams = train_lib.HParams( batch_size=16, max_number_of_steps=2, noise_dims=3, output_dir=self.get_temp_dir()) # Construct mock inputs. mock_imgs = np.zeros([hparams.batch_size, 28, 28, 1], dtype=np.float32) mock_lbls = np.concatenate( (np.ones([hparams.batch_size, 1], dtype=np.int32), np.zeros([hparams.batch_size, 9], dtype=np.int32)), axis=1) mock_data_provider.provide_data.return_value = (mock_imgs, mock_lbls) train_lib.train(hparams) if __name__ == '__main__': tf.test.main()
apache-2.0
-7,609,691,693,671,442,000
30.096154
75
0.701917
false
zqqf16/SYM
SYM/Models/models.py
1
1880
#!/usr/bin/env python # -*- coding: UTF-8 -*- import re import sys import json import requests def get_raw(url): r = requests.get(url) if r.status_code != 200: return None return r.text def parse_models(regex, text): result = [] lastModel = "" model_regex = re.compile(r'.*\d,\d') for item in regex.findall(text): if model_regex.match(item): result.append([item, lastModel]) else: lastModel = item return result def get_all_models(url): text = get_raw(url) if not text: print("Connect to url failed") return results = [ ["i386", "Simulator"], ["x86_64", "Simulator"], ] ipad = re.compile(r'rowspan.*(iPad[\w \(\)-.]*)') results += parse_models(ipad, text) iPhone = re.compile(r'rowspan.*(iPhone[\w \(\)-.]*)') results += parse_models(iPhone, text) iPod = re.compile(r'rowspan.*(iPod[\w \(\)-.]*)') results += parse_models(iPod, text) watch = re.compile(r'rowspan.*(Watch[\w \(\)-.]*)') results += parse_models(watch, text) return results def json_output(results): json_dict = { m[0]: m[1] for m in results } print(json.dumps(json_dict, indent=4)) def nsdict_output(results): print("@{") for m in results: print(' @"{}": @"{}",'.format(m[0], m[1])) print('}') def text_output(results): for m in results: print('{}:{}'.format(*m)) def pretty(results, fmt='json'): if fmt == 'nsdict': nsdict_output(results) elif fmt == 'json': json_output(results) else: text_output(results) if __name__ == '__main__': results = get_all_models('https://www.theiphonewiki.com/w/index.php?title=Models&action=edit') fmt = 'text' if len(sys.argv) > 1: fmt = sys.argv[1] pretty(results, fmt)
mit
1,235,590,171,324,613,600
22.5125
98
0.55
false
iamsteadman/bambu-api
bambu_api/__init__.py
1
1314
""" Quickly expose your models to a JSON or XML API, authenticated via HTTP or OAuth. """ __version__ = '2.0.1' from bambu_api.options import * from bambu_api.sites import APISite from bambu_api.exceptions import APIException from bambu_api.decorators import argument, returns, named from django.conf import settings from datetime import datetime default_app_config = 'bambu_api.apps.APIConfig' site = APISite() def autodiscover(): """ Works like ``django.contrib.admin.autodiscover``, running thorugh each of the packages within a project's ``INSTALLED_APPS`` setting, to find instances of an ``api`` module which might contain calls to ``bambu_api.site.register``. Unlike ``django.contrib.admin.autodiscover``, you do not need to call this function manually. """ from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule from copy import copy, deepcopy from bambu_api.endpoints import * for app in settings.INSTALLED_APPS: mod = import_module(app) try: before_import_registry = copy(site._registry) import_module('%s.api' % app) except: site._registry = before_import_registry if module_has_submodule(mod, 'api'): raise
apache-2.0
-4,812,282,785,063,941,000
31.04878
100
0.690259
false
gpfei/python-acsmx2
acsmx2/test.py
1
3625
import unittest from acsmx2.search import Matcher, MatchedWord class MatchTestCase(unittest.TestCase): def setUp(self): self.matcher = Matcher(1024) def test_abc(self): for i, pattern in enumerate([ b'hello', b'world', b'ld', ]): self.matcher.add_pattern(pattern, i) self.matcher.compile() text = b''' this is a hello-world-example. Even helloooworlddd will be matched. ''' count, words = self.matcher.search(text) self.assertEqual(len(words), 4) self.assertEqual(len(words), count) self.assertListEqual(words, [ MatchedWord(23, b'hello'), MatchedWord(32, b'ld'), MatchedWord(61, b'hello'), MatchedWord(71, b'ld'), ]) count, words = self.matcher.search_all(text) self.assertEqual(len(words), 6) self.assertEqual(len(words), count) self.assertListEqual(words, [ MatchedWord(23, b'hello'), MatchedWord(32, b'ld'), MatchedWord(29, b'world'), MatchedWord(61, b'hello'), MatchedWord(71, b'ld'), MatchedWord(68, b'world'), ]) def test_chinese(self): for i, pattern in enumerate([ u'北京'.encode(), u'北京大学'.encode(), u'大学'.encode(), ]): self.matcher.add_pattern(pattern, i) self.matcher.compile() text = u'''我来到北京大学校门口'''.encode() count, words = self.matcher.search(text) self.assertEqual(len(words), 2) self.assertEqual(len(words), count) self.assertListEqual(words, [ MatchedWord(9, '北京'.encode()), MatchedWord(15, '大学'.encode()), ]) count, words = self.matcher.search_all(text) self.assertEqual(len(words), 3) self.assertEqual(len(words), count) self.assertListEqual(words, [ MatchedWord(9, '北京'.encode()), MatchedWord(15, '大学'.encode()), MatchedWord(9, '北京大学'.encode()), ]) class MaxSizeTestCase(unittest.TestCase): def test_no_enough_length(self): matcher = Matcher(10) for i, pattern in enumerate([ b'hello', b'world', ]): matcher.add_pattern(pattern, i) matcher.compile() text = b'this is a hello-world-example.' count, words = matcher.search(text) self.assertEqual(count, 2) self.assertEqual(len(words), 1) self.assertListEqual(words, [MatchedWord(10, b'hello')]) def test_edge_case(self): matcher = Matcher(8) for i, pattern in enumerate([ b'hello', ]): matcher.add_pattern(pattern, i) matcher.compile() text = b'this is a hello-world-example.' count, words = matcher.search(text) self.assertEqual(len(words), 0) self.assertEqual(count, 1) self.assertListEqual(words, []) matcher = Matcher(9) for i, pattern in enumerate([ b'hello', ]): matcher.add_pattern(pattern, i) matcher.compile() text = b'this is a hello-world-example.' count, words = matcher.search(text) self.assertEqual(len(words), 1) self.assertEqual(count, 1) self.assertListEqual(words, [MatchedWord(10, b'hello')]) if __name__ == '__main__': unittest.main()
gpl-2.0
-8,894,289,022,924,483,000
29.211864
64
0.53324
false
azumimuo/family-xbmc-addon
plugin.video.bubbles/resources/lib/sources/german/hoster/open/moviesever.py
1
5229
# -*- coding: utf-8 -*- """ Bubbles Addon Copyright (C) 2016 Viper2k4 This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import base64 import re import urllib import urlparse from resources.lib.modules import cleantitle from resources.lib.modules import client from resources.lib.modules import directstream from resources.lib.modules import source_utils from resources.lib.modules import dom_parser class source: def __init__(self): self.priority = 1 self.language = ['de'] self.domains = ['moviesever.com/'] self.base_link = 'http://moviesever.com/' self.search_link = '/?s=%s' self.get_link = 'http://play.seriesever.net/me/moviesever.php' def movie(self, imdb, title, localtitle, aliases, year): try: url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year) if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year) return url except: return def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'}) rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'}) rels = dom_parser.parse_dom(rels, 'li') rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels] rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]] rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'] r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels] r = [(re.findall('link"?\s*:\s*"(.+?)"', ''.join([x.content for x in i])), dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')) for i in r] r = [i[0][0] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]] for i in r: try: i = re.sub('\[.+?\]|\[/.+?\]', '', i) i = client.replaceHTMLCodes(i) if not i.startswith('http'): i = self.__decode_hash(i) valid, host = source_utils.is_host_valid(i, hostDict) if not valid: continue if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i) elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i) elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i) else: direct = False; urls = [{'quality': 'SD', 'url': i}] for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False}) except: pass return sources except: return sources def resolve(self, url): if url.startswith('/'): url = 'http:%s' % url return url def __decode_hash(self, hash): hash = hash.replace("!BeF", "R") hash = hash.replace("@jkp", "Ax") hash += '=' * (-len(hash) % 4) try: return base64.b64decode(hash) except: return def __search(self, titles, year): try: query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0]))) query = urlparse.urljoin(self.base_link, query) t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(query) r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'}) r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}), dom_parser.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r] r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), i[1][0].content) for i in r if i[0] and i[1]] r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0] return source_utils.strip_domain(r) except: return
gpl-2.0
-8,893,008,589,265,289,000
40.84
174
0.552113
false
longmazhanfeng/automationlibrary
MobileLibrary/setup.py
1
2204
#!/usr/bin/env python import sys from os.path import join, dirname sys.path.append(join(dirname(__file__), 'src')) from ez_setup import use_setuptools use_setuptools() from setuptools import setup execfile(join(dirname(__file__), 'src', 'AppiumLibrary', 'version.py')) setup(name = 'robotframework-mobilelibrary', version = VERSION, description = 'app testing library for Robot Framework Extended with AppiumLibrary', long_description = open(join(dirname(__file__), 'README.rst')).read(), author = 'Subscription QA', author_email = '<[email protected]>', url = 'https://g.hz.netease.com/yixinplusQA/RFUI_Framework/tree/master/Third-Party-Module/MobileLibrary', license = 'Apache License 2.0', keywords = 'robotframework testing testautomation mobile appium webdriver app', platforms = 'any', classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development :: Testing" ], install_requires = [ 'images2gif-Pillow >= 0.0.2', 'decorator >= 3.3.2', 'robotframework >= 2.9.1, <=2.9.2', 'docutils >= 0.8.1', 'Appium-Python-Client >= 0.5', 'selenium >= 2.47.1', 'mock >= 1.0.1, <=1.3.0', 'sauceclient >= 0.1.0', 'pytest-cov >= 1.8.1', 'pytest-xdist >= 1.11', 'pytest-pythonpath >= 0.4', ], py_modules=['ez_setup'], package_dir = {'' : 'src'}, packages = ['AppiumLibrary','AppiumLibrary.keywords','AppiumLibrary.locators', 'AppiumLibrary.utils','AppiumLibrary.utils.events'], include_package_data = True, )
mit
-6,960,277,275,502,162,000
44.916667
120
0.500907
false
ShashaQin/frappe
frappe/email/bulk.py
1
11335
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe import HTMLParser import smtplib from frappe import msgprint, throw, _ from frappe.email.smtp import SMTPServer, get_outgoing_email_account from frappe.email.email_body import get_email, get_formatted_html from frappe.utils.verified_command import get_signed_params, verify_request from html2text import html2text from frappe.utils import get_url, nowdate, encode, now_datetime, add_days, split_emails, cstr, cint class BulkLimitCrossedError(frappe.ValidationError): pass def send(recipients=None, sender=None, subject=None, message=None, reference_doctype=None, reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None, attachments=None, reply_to=None, cc=(), show_as_cc=(), message_id=None, in_reply_to=None, send_after=None, expose_recipients=False, bulk_priority=1, communication=None): """Add email to sending queue (Bulk Email) :param recipients: List of recipients. :param sender: Email sender. :param subject: Email subject. :param message: Email message. :param reference_doctype: Reference DocType of caller document. :param reference_name: Reference name of caller document. :param bulk_priority: Priority for bulk email, default 1. :param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.bulk.unsubscribe`. :param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email :param attachments: Attachments to be sent. :param reply_to: Reply to be captured here (default inbox) :param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email. :param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To. :param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date. :param communication: Communication link to be set in Bulk Email record """ if not unsubscribe_method: unsubscribe_method = "/api/method/frappe.email.bulk.unsubscribe" if not recipients: return if isinstance(recipients, basestring): recipients = split_emails(recipients) if isinstance(send_after, int): send_after = add_days(nowdate(), send_after) email_account = get_outgoing_email_account(True, append_to=reference_doctype) if not sender or sender == "Administrator": sender = email_account.default_sender check_bulk_limit(recipients) formatted = get_formatted_html(subject, message, email_account=email_account) try: text_content = html2text(formatted) except HTMLParser.HTMLParseError: text_content = "See html attachment" if reference_doctype and reference_name: unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email", {"reference_doctype": reference_doctype, "reference_name": reference_name})] unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email", {"global_unsubscribe": 1})] else: unsubscribed = [] recipients = [r for r in list(set(recipients)) if r and r not in unsubscribed] for email in recipients: email_content = formatted email_text_context = text_content if reference_doctype: unsubscribe_link = get_unsubscribe_link( reference_doctype=reference_doctype, reference_name=reference_name, email=email, recipients=recipients, expose_recipients=expose_recipients, unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message, show_as_cc=show_as_cc ) email_content = email_content.replace("<!--unsubscribe link here-->", unsubscribe_link.html) email_text_context += unsubscribe_link.text # show as cc cc_message = "" if email in show_as_cc: cc_message = _("This email was sent to you as CC") email_content = email_content.replace("<!-- cc message -->", cc_message) email_text_context = cc_message + "\n" + email_text_context # add to queue add(email, sender, subject, email_content, email_text_context, reference_doctype, reference_name, attachments, reply_to, cc, message_id, in_reply_to, send_after, bulk_priority, email_account=email_account, communication=communication) def add(email, sender, subject, formatted, text_content=None, reference_doctype=None, reference_name=None, attachments=None, reply_to=None, cc=(), message_id=None, in_reply_to=None, send_after=None, bulk_priority=1, email_account=None, communication=None): """add to bulk mail queue""" e = frappe.new_doc('Bulk Email') e.recipient = email e.priority = bulk_priority try: mail = get_email(email, sender=sender, formatted=formatted, subject=subject, text_content=text_content, attachments=attachments, reply_to=reply_to, cc=cc, email_account=email_account) mail.set_message_id(message_id) if in_reply_to: mail.set_in_reply_to(in_reply_to) e.message = cstr(mail.as_string()) e.sender = mail.sender except frappe.InvalidEmailAddressError: # bad email id - don't add to queue return e.reference_doctype = reference_doctype e.reference_name = reference_name e.communication = communication e.send_after = send_after e.insert(ignore_permissions=True) def check_bulk_limit(recipients): # get count of mails sent this month this_month = frappe.db.sql("""select count(name) from `tabBulk Email` where status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0] # if using settings from site_config.json, check bulk limit # No limit for own email settings smtp_server = SMTPServer() if (smtp_server.email_account and getattr(smtp_server.email_account, "from_site_config", False) or frappe.flags.in_test): monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500 if (this_month + len(recipients)) > monthly_bulk_mail_limit: throw(_("Cannot send this email. You have crossed the sending limit of {0} emails for this month.").format(monthly_bulk_mail_limit), BulkLimitCrossedError) def get_unsubscribe_link(reference_doctype, reference_name, email, recipients, expose_recipients, show_as_cc, unsubscribe_method, unsubscribe_params, unsubscribe_message): email_sent_to = recipients if expose_recipients else [email] email_sent_cc = ", ".join([e for e in email_sent_to if e in show_as_cc]) email_sent_to = ", ".join([e for e in email_sent_to if e not in show_as_cc]) if email_sent_cc: email_sent_message = _("This email was sent to {0} and copied to {1}").format(email_sent_to, email_sent_cc) else: email_sent_message = _("This email was sent to {0}").format(email_sent_to) if not unsubscribe_message: unsubscribe_message = _("Unsubscribe from this list") unsubscribe_url = get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params) html = """<div style="margin: 15px auto; padding: 0px 7px; text-align: center; color: #8d99a6;"> {email} <p style="margin: 15px auto;"> <a href="{unsubscribe_url}" style="color: #8d99a6; text-decoration: underline; target="_blank">{unsubscribe_message} </a> </p> </div>""".format( unsubscribe_url = unsubscribe_url, email=email_sent_message, unsubscribe_message=unsubscribe_message ) text = "\n{email}\n\n{unsubscribe_message}: {unsubscribe_url}".format( email=email_sent_message, unsubscribe_message=unsubscribe_message, unsubscribe_url=unsubscribe_url ) return frappe._dict({ "html": html, "text": text }) def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params): params = {"email": email.encode("utf-8"), "doctype": reference_doctype.encode("utf-8"), "name": reference_name.encode("utf-8")} if unsubscribe_params: params.update(unsubscribe_params) query_string = get_signed_params(params) # for test frappe.local.flags.signed_query_string = query_string return get_url(unsubscribe_method + "?" + get_signed_params(params)) @frappe.whitelist(allow_guest=True) def unsubscribe(doctype, name, email): # unsubsribe from comments and communications if not verify_request(): return try: frappe.get_doc({ "doctype": "Email Unsubscribe", "email": email, "reference_doctype": doctype, "reference_name": name }).insert(ignore_permissions=True) except frappe.DuplicateEntryError: frappe.db.rollback() else: frappe.db.commit() return_unsubscribed_page(email, doctype, name) def return_unsubscribed_page(email, doctype, name): frappe.respond_as_web_page(_("Unsubscribed"), _("{0} has left the conversation in {1} {2}").format(email, _(doctype), name)) def flush(from_test=False): """flush email queue, every time: called from scheduler""" smtpserver = SMTPServer() auto_commit = not from_test # additional check check_bulk_limit([]) if frappe.are_emails_muted(): msgprint(_("Emails are muted")) from_test = True frappe.db.sql("""update `tabBulk Email` set status='Expired' where datediff(curdate(), creation) > 3 and status='Not Sent'""", auto_commit=auto_commit) for i in xrange(500): if cint(frappe.defaults.get_defaults().get("hold_bulk")): break email = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent' and ifnull(send_after, "2000-01-01 00:00:00") < %s order by priority desc, creation asc limit 1 for update""", now_datetime(), as_dict=1) if email: email = email[0] else: break frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""", (email["name"],), auto_commit=auto_commit) if email.communication: frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit) try: if not from_test: smtpserver.setup_email_account(email.reference_doctype) smtpserver.sess.sendmail(email["sender"], email["recipient"], encode(email["message"])) frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""", (email["name"],), auto_commit=auto_commit) if email.communication: frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit) except (smtplib.SMTPServerDisconnected, smtplib.SMTPConnectError, smtplib.SMTPHeloError, smtplib.SMTPAuthenticationError, frappe.ValidationError): # bad connection, retry later frappe.db.sql("""update `tabBulk Email` set status='Not Sent' where name=%s""", (email["name"],), auto_commit=auto_commit) if email.communication: frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit) # no need to attempt further return except Exception, e: frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit) if email.communication: frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit) # NOTE: removing commit here because we pass auto_commit # finally: # frappe.db.commit() def clear_outbox(): """Remove mails older than 31 days in Outbox. Called daily via scheduler.""" frappe.db.sql("""delete from `tabBulk Email` where datediff(now(), creation) > 31""")
mit
8,331,891,317,508,942,000
34.870253
166
0.727305
false
fake-name/ReadableWebProxy
WebMirror/management/rss_parser_funcs/feed_parse_extractMaddertranslatesCom.py
1
1779
def extractMaddertranslatesCom(item): ''' Parser for 'maddertranslates.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('Form A Slaves Only Harem Guild', 'An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'translated'), ('IT IS A DIFFERENT WORLD AND YET I AM CULTIVATING MONSTERS', 'It Is A Different World And Yet I Am Cultivating Monsters', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) titlemap = [ ('The Bloodshot One-Eyed Zombie Emperor ', 'The Bloodshot One-Eyed Zombie Emperor', 'translated'), ('An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'An S Rank Adventurer Me Along With Those Girls Who Are Slaves, Form A Slaves Only Harem Guild', 'translated'), ('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'), ('Master of Dungeon', 'Master of Dungeon', 'oel'), ] for titlecomponent, name, tl_type in titlemap: if titlecomponent.lower() in item['title'].lower(): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
bsd-3-clause
8,159,679,789,587,798,000
52.939394
217
0.589657
false
chincisan/google-python-exercises
basic/string1.py
1
3560
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Basic string exercises # Fill in the code for the functions below. main() is already set up # to call the functions with a few different inputs, # printing 'OK' when each function is correct. # The starter code for each function includes a 'return' # which is just a placeholder for your code. # It's ok if you do not complete all the functions, and there # are some additional functions to try in string2.py. # A. donuts # Given an int count of a number of donuts, return a string # of the form 'Number of donuts: <count>', where <count> is the number # passed in. However, if the count is 10 or more, then use the word 'many' # instead of the actual count. # So donuts(5) returns 'Number of donuts: 5' # and donuts(23) returns 'Number of donuts: many' def donuts(count): message = 'Number of donuts: ' if count < 10: return message + str(count) else: return message + 'many' # B. both_ends # Given a string s, return a string made of the first 2 # and the last 2 chars of the original string, # so 'spring' yields 'spng'. However, if the string length # is less than 2, return instead the empty string. def both_ends(s): if len(s)<2: return'' else: return s[:2] + s[-2:] return # C. fix_start # Given a string s, return a string # where all occurences of its first char have # been changed to '*', except do not change # the first char itself. # e.g. 'babble' yields 'ba**le' # Assume that the string is length 1 or more. # Hint: s.replace(stra, strb) returns a version of string s # where all instances of stra have been replaced by strb. def fix_start(s): # inline-cool solution return s[0] + s.replace(s[0],'*')[1:] # D. MixUp # Given strings a and b, return a single string with a and b separated # by a space '<a> <b>', except swap the first 2 chars of each string. # e.g. # 'mix', pod' -> 'pox mid' # 'dog', 'dinner' -> 'dig donner' # Assume a and b are length 2 or more. def mix_up(a, b): return b[:2] + a[2:] + ' ' + a[:2] + b[2:] # Provided simple test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Provided main() calls the above functions with interesting inputs, # using test() to check if each result is correct or not. def main(): print 'donuts' # Each line calls donuts, compares its result to the expected for that call. test(donuts(4), 'Number of donuts: 4') test(donuts(9), 'Number of donuts: 9') test(donuts(10), 'Number of donuts: many') test(donuts(99), 'Number of donuts: many') print print 'both_ends' test(both_ends('spring'), 'spng') test(both_ends('Hello'), 'Helo') test(both_ends('a'), '') test(both_ends('xyz'), 'xyyz') print print 'fix_start' test(fix_start('babble'), 'ba**le') test(fix_start('aardvark'), 'a*rdv*rk') test(fix_start('google'), 'goo*le') test(fix_start('donut'), 'donut') print print 'mix_up' test(mix_up('mix', 'pod'), 'pox mid') test(mix_up('dog', 'dinner'), 'dig donner') test(mix_up('gnash', 'sport'), 'spash gnort') test(mix_up('pezzy', 'firm'), 'fizzy perm') # Standard boilerplate to call the main() function. if __name__ == '__main__': main()
apache-2.0
-8,113,552,083,929,637,000
29.689655
78
0.666011
false
WillianPaiva/1flow
oneflow/base/templatetags/base_utils.py
1
6677
# -*- coding: utf-8 -*- """ Copyright 2012-2014 Olivier Cortès <[email protected]> This file is part of the 1flow project. 1flow is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. 1flow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with 1flow. If not, see http://www.gnu.org/licenses/ """ import re from django.template import Library, Node, TemplateSyntaxError from django.template.base import Node, TemplateSyntaxError from django.utils.encoding import smart_text from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse from sparks.foundations import utils as sfu register = Library() def get_view_name(context): # context['request'].resolver_match.func # context['request'].resolver_match.args # context['request'].resolver_match.kwargs # context['request'].resolver_match.view_name try: return context['request'].resolver_match.view_name except AttributeError: # Happens on / when the request is a # WSGIRequest and not an HttpRequest. return u'home' @register.simple_tag(takes_context=True) def reverse_active(context, views_names, return_value=None): """ In the template: class="{% reverse_active "view_name" %}" class="{% reverse_active "view_name1,view_name2" "my-active" %}" Taken from http://gnuvince.wordpress.com/2007/09/14/a-django-template-tag-for-the-current-active-page/ #NOQA and extended a lot to simplify template calls… """ for view_name in views_names.split(','): if reverse(view_name) == context['request'].path: return return_value or u'active' return u'' @register.simple_tag(takes_context=True) def view_name_active(context, pattern, return_value=None): """ Same as reverse active, but for URLs without any view. :param:`pattern` must be a valid regular expression. class="{% active "/help/" "top-menu-element-active" %}" """ view_name = get_view_name(context) if re.search(pattern, view_name): return return_value or u'active' return u'' class CaptureasNode(Node): def __init__(self, nodelist, varname): self.nodelist = nodelist self.varname = varname def render(self, context): output = self.nodelist.render(context) context[self.varname] = output return '' class FirstOfAsNode(Node): def __init__(self, args, variable_name=None): self.vars = args self.variable_name = variable_name def render(self, context): for var in self.vars: value = var.resolve(context, True) if value: if self.variable_name: context[self.variable_name] = value break else: return smart_text(value) return '' @register.tag(name='captureas') def do_captureas(parser, token): """ Taken from http://djangosnippets.org/snippets/545/ verbatim. Handy! Initial source: https://code.djangoproject.com/ticket/7239 """ try: tag_name, args = token.contents.split(None, 1) except ValueError: raise TemplateSyntaxError( "'captureas' node requires a variable name.") nodelist = parser.parse(('endcaptureas',)) parser.delete_first_token() return CaptureasNode(nodelist, args) @register.tag def firstofas(parser, token): """ Original idea: https://code.djangoproject.com/ticket/12199 """ bits = token.split_contents()[1:] variable_name = None expecting_save_as = bits[-2] == 'as' if expecting_save_as: variable_name = bits.pop(-1) bits = bits[:-1] if len(bits) < 1: raise TemplateSyntaxError( "'firstofas' statement requires at least one argument") return FirstOfAsNode([parser.compile_filter(bit) for bit in bits], variable_name) @register.inclusion_tag('snippets/countdown.html') def countdown(value, redirect=None, limit=0, show_seconds=True, format=None, spacer=None): """ From http://www.plus2net.com/javascript_tutorial/countdown.php """ if redirect is None: redirect = '/' if limit > 0: operation = '+' round_value = 0 counter_test = '<=' else: operation = '-' round_value = 0 # WAS: 2 counter_test = '>=' if format is None or format == 'long': separator = ', ' short = False units = { 'day': _('day'), 'days': _('days'), 'hour': _('hour'), 'hours': _('hours'), 'minute': _('minute'), 'minutes': _('minutes'), 'second': _('second'), 'seconds': _('seconds'), } elif format == 'abbr': separator = ' ' short = True units = { 'day': _('day'), 'days': _('days'), 'hour': _('hour'), 'hours': _('hours'), 'minute': _('min'), 'minutes': _('mins'), 'second': _('sec'), 'seconds': _('secs'), } elif format == 'short': separator = ' ' short = True units = { 'day': _('d'), 'days': _('d'), 'hour': _('h'), 'hours': _('h'), 'minute': _('m'), 'minutes': _('m'), 'second': _('s'), 'seconds': _('s'), } else: raise TemplateSyntaxError("'countdown' 'format' keyword argument " "must be either 'short', 'abbr' or 'long'") return { 'name': sfu.unique_hash(only_letters=True), 'units': units, 'short': short, 'value': value, 'limit': limit, 'unit_sep': ' ' if spacer is None else spacer, 'redirect': redirect, 'operation': operation, 'separator': separator, 'round_value': round_value, 'show_seconds': show_seconds, 'counter_test': counter_test, } @register.filter def lookup(d, key): return d[key]
agpl-3.0
7,640,967,080,721,701,000
27.279661
116
0.574318
false
rplevka/robottelo
tests/foreman/api/test_hostcollection.py
1
14852
"""Unit tests for host collections. :Requirement: Hostcollection :CaseAutomation: Automated :CaseLevel: Acceptance :CaseComponent: HostCollections :Assignee: swadeley :TestType: Functional :CaseImportance: High :Upstream: No """ from random import choice from random import randint import pytest from broker import VMBroker from nailgun import entities from requests.exceptions import HTTPError from robottelo.datafactory import invalid_values_list from robottelo.datafactory import parametrized from robottelo.datafactory import valid_data_list from robottelo.hosts import ContentHost @pytest.fixture(scope='module') def fake_hosts(module_org): """Create content hosts that can be shared by tests.""" hosts = [entities.Host(organization=module_org).create() for _ in range(2)] return hosts @pytest.mark.parametrize('name', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_create_with_name(module_org, name): """Create host collections with different names. :id: 8f2b9223-f5be-4cb1-8316-01ea747cae14 :parametrized: yes :expectedresults: The host collection was successfully created and has appropriate name. :CaseImportance: Critical """ host_collection = entities.HostCollection(name=name, organization=module_org).create() assert host_collection.name == name @pytest.mark.tier1 def test_positive_list(module_org): """Create new host collection and then retrieve list of all existing host collections :id: 6ae32df2-b917-4830-8709-15fb272b76c1 :BZ: 1331875 :expectedresults: Returned list of host collections for the system contains at least one collection :CaseImportance: Critical """ entities.HostCollection(organization=module_org).create() hc_list = entities.HostCollection().search() assert len(hc_list) >= 1 @pytest.mark.tier1 def test_positive_list_for_organization(): """Create host collection for specific organization. Retrieve list of host collections for that organization :id: 5f9de8ab-2c53-401b-add3-57d86c97563a :expectedresults: The host collection was successfully created and present in the list of collections for specific organization :CaseImportance: Critical """ org = entities.Organization().create() hc = entities.HostCollection(organization=org).create() hc_list = entities.HostCollection(organization=org).search() assert len(hc_list) == 1 assert hc_list[0].id == hc.id @pytest.mark.parametrize('desc', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_create_with_description(module_org, desc): """Create host collections with different descriptions. :id: 9d13392f-8d9d-4ff1-8909-4233e4691055 :parametrized: yes :expectedresults: The host collection was successfully created and has appropriate description. :CaseImportance: Critical """ host_collection = entities.HostCollection(description=desc, organization=module_org).create() assert host_collection.description == desc @pytest.mark.tier1 def test_positive_create_with_limit(module_org): """Create host collections with different limits. :id: 86d9387b-7036-4794-96fd-5a3472dd9160 :expectedresults: The host collection was successfully created and has appropriate limit. :CaseImportance: Critical """ for _ in range(5): limit = randint(1, 30) host_collection = entities.HostCollection( max_hosts=limit, organization=module_org ).create() assert host_collection.max_hosts == limit @pytest.mark.parametrize("unlimited", [False, True]) @pytest.mark.tier1 def test_positive_create_with_unlimited_hosts(module_org, unlimited): """Create host collection with different values of 'unlimited hosts' parameter. :id: d385574e-5794-4442-b6cd-e5ded001d877 :parametrized: yes :expectedresults: The host collection was successfully created and has appropriate 'unlimited hosts' parameter value. :CaseImportance: Critical """ host_collection = entities.HostCollection( max_hosts=None if unlimited else 1, organization=module_org, unlimited_hosts=unlimited, ).create() assert host_collection.unlimited_hosts == unlimited @pytest.mark.tier1 def test_positive_create_with_host(module_org, fake_hosts): """Create a host collection that contains a host. :id: 9dc0ad72-58c2-4079-b1ca-2c4373472f0f :expectedresults: The host collection can be read back, and it includes one host. :CaseImportance: Critical :BZ: 1325989 """ host_collection = entities.HostCollection( host=[fake_hosts[0]], organization=module_org ).create() assert len(host_collection.host) == 1 @pytest.mark.tier1 def test_positive_create_with_hosts(module_org, fake_hosts): """Create a host collection that contains hosts. :id: bb8d2b42-9a8b-4c4f-ba0c-c56ae5a7eb1d :expectedresults: The host collection can be read back, and it references two hosts. :CaseImportance: Critical :BZ: 1325989 """ host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create() assert len(host_collection.host) == len(fake_hosts) @pytest.mark.tier2 def test_positive_add_host(module_org, fake_hosts): """Add a host to host collection. :id: da8bc901-7ac8-4029-bb62-af21aa4d3a88 :expectedresults: Host was added to the host collection. :CaseLevel: Integration :BZ:1325989 """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.host_ids = [fake_hosts[0].id] host_collection = host_collection.update(['host_ids']) assert len(host_collection.host) == 1 @pytest.mark.upgrade @pytest.mark.tier2 def test_positive_add_hosts(module_org, fake_hosts): """Add hosts to host collection. :id: f76b4db1-ccd5-47ab-be15-8c7d91d03b22 :expectedresults: Hosts were added to the host collection. :CaseLevel: Integration :BZ: 1325989 """ host_collection = entities.HostCollection(organization=module_org).create() host_ids = [str(host.id) for host in fake_hosts] host_collection.host_ids = host_ids host_collection = host_collection.update(['host_ids']) assert len(host_collection.host) == len(fake_hosts) @pytest.mark.tier1 def test_positive_read_host_ids(module_org, fake_hosts): """Read a host collection and look at the ``host_ids`` field. :id: 444a1528-64c8-41b6-ba2b-6c49799d5980 :expectedresults: The ``host_ids`` field matches the host IDs passed in when creating the host collection. :CaseImportance: Critical :BZ:1325989 """ host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create() assert frozenset(host.id for host in host_collection.host) == frozenset( host.id for host in fake_hosts ) @pytest.mark.parametrize('new_name', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_update_name(module_org, new_name): """Check if host collection name can be updated :id: b2dedb99-6dd7-41be-8aaa-74065c820ac6 :parametrized: yes :expectedresults: Host collection name was successfully updated :CaseImportance: Critical """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.name = new_name assert host_collection.update().name == new_name @pytest.mark.parametrize('new_desc', **parametrized(valid_data_list())) @pytest.mark.tier1 def test_positive_update_description(module_org, new_desc): """Check if host collection description can be updated :id: f8e9bd1c-1525-4b5f-a07c-eb6b6e7aa628 :parametrized: yes :expectedresults: Host collection description was updated :CaseImportance: Critical """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.description = new_desc assert host_collection.update().description == new_desc @pytest.mark.tier1 def test_positive_update_limit(module_org): """Check if host collection limit can be updated :id: 4eda7796-cd81-453b-9b72-4ef84b2c1d8c :expectedresults: Host collection limit was updated :CaseImportance: Critical """ host_collection = entities.HostCollection( max_hosts=1, organization=module_org, unlimited_hosts=False ).create() for limit in (1, 3, 5, 10, 20): host_collection.max_hosts = limit assert host_collection.update().max_hosts == limit @pytest.mark.tier1 def test_positive_update_unlimited_hosts(module_org): """Check if host collection 'unlimited hosts' parameter can be updated :id: 09a3973d-9832-4255-87bf-f9eaeab4aee8 :expectedresults: Host collection 'unlimited hosts' parameter was updated :CaseImportance: Critical """ random_unlimited = choice([True, False]) host_collection = entities.HostCollection( max_hosts=1 if not random_unlimited else None, organization=module_org, unlimited_hosts=random_unlimited, ).create() for unlimited in (not random_unlimited, random_unlimited): host_collection.max_hosts = 1 if not unlimited else None host_collection.unlimited_hosts = unlimited host_collection = host_collection.update(['max_hosts', 'unlimited_hosts']) assert host_collection.unlimited_hosts == unlimited @pytest.mark.tier1 def test_positive_update_host(module_org, fake_hosts): """Update host collection's host. :id: 23082854-abcf-4085-be9c-a5d155446acb :expectedresults: The host collection was updated with a new host. :CaseImportance: Critical """ host_collection = entities.HostCollection( host=[fake_hosts[0]], organization=module_org ).create() host_collection.host_ids = [fake_hosts[1].id] host_collection = host_collection.update(['host_ids']) assert host_collection.host[0].id == fake_hosts[1].id @pytest.mark.upgrade @pytest.mark.tier1 def test_positive_update_hosts(module_org, fake_hosts): """Update host collection's hosts. :id: 0433b37d-ae16-456f-a51d-c7b800334861 :expectedresults: The host collection was updated with new hosts. :CaseImportance: Critical """ host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create() new_hosts = [entities.Host(organization=module_org).create() for _ in range(2)] host_ids = [str(host.id) for host in new_hosts] host_collection.host_ids = host_ids host_collection = host_collection.update(['host_ids']) assert {host.id for host in host_collection.host} == {host.id for host in new_hosts} @pytest.mark.upgrade @pytest.mark.tier1 def test_positive_delete(module_org): """Check if host collection can be deleted :id: 13a16cd2-16ce-4966-8c03-5d821edf963b :expectedresults: Host collection was successfully deleted :CaseImportance: Critical """ host_collection = entities.HostCollection(organization=module_org).create() host_collection.delete() with pytest.raises(HTTPError): host_collection.read() @pytest.mark.parametrize('name', **parametrized(invalid_values_list())) @pytest.mark.tier1 def test_negative_create_with_invalid_name(module_org, name): """Try to create host collections with different invalid names :id: 38f67d04-a19d-4eab-a577-21b8d62c7389 :parametrized: yes :expectedresults: The host collection was not created :CaseImportance: Critical """ with pytest.raises(HTTPError): entities.HostCollection(name=name, organization=module_org).create() @pytest.mark.tier1 def test_positive_add_remove_subscription(module_org, module_ak_cv_lce, module_puppet_classes): """Try to bulk add and remove a subscription to members of a host collection. :id: c4ec5727-eb25-452e-a91f-87cafb16666b :steps: 1. Create HC, add AK to HC 2. Create product so we can use it's subscription 3. Create some VMs and register them with AK so they are in HC 4. Add the subscription to the members of the Host Collection 5. Assert subscription is added 6. Bulk remove subscription 7. Assert it is removed :expectedresults: subscription added to, and removed from, members of host collection :CaseImportance: Critical """ # this command creates a host collection and "appends", makes available, to the AK module_ak_cv_lce.host_collection.append( entities.HostCollection(organization=module_org).create() ) # Move HC from Add tab to List tab on AK view module_ak_cv_lce = module_ak_cv_lce.update(['host_collection']) # Create a product so we have a subscription to use product = entities.Product(organization=module_org).create() prod_name = product.name product_subscription = entities.Subscription().search(query={'search': f'name={prod_name}'})[0] # Create and register VMs as members of Host Collection with VMBroker(nick='rhel7', host_classes={'host': ContentHost}, _count=2) as hosts: for client in hosts: client.install_katello_ca() client.register_contenthost(module_org.label, module_ak_cv_lce.name) # Read host_collection back from Satellite to get host_ids host_collection = module_ak_cv_lce.host_collection[0].read() host_ids = [host.id for host in host_collection.host] # Add subscription # Call nailgun to make the API PUT to members of Host Collection entities.Host().bulk_add_subscriptions( data={ "organization_id": module_org.id, "included": {"ids": host_ids}, "subscriptions": [{"id": product_subscription.id, "quantity": 1}], } ) # GET the subscriptions from hosts and assert they are there for host_id in host_ids: req = entities.HostSubscription(host=host_id).subscriptions() assert ( prod_name in req['results'][0]['product_name'] ), 'Subscription not applied to HC members' # Remove the subscription # Call nailgun to make the API PUT to members of Host Collection entities.Host().bulk_remove_subscriptions( data={ "organization_id": module_org.id, "included": {"ids": host_ids}, "subscriptions": [{"id": product_subscription.id, "quantity": 1}], } ) # GET the subscriptions from hosts and assert they are gone for host_id in host_ids: req = entities.HostSubscription(host=host_id).subscriptions() assert not req['results'], 'Subscription not removed from HC members'
gpl-3.0
-3,255,145,031,037,407,000
31.077754
99
0.69903
false
qris/dynamo-demo
deploy/fablib.py
1
14547
import os, sys import getpass import time import shutil from fabric.api import * from fabric.contrib import files, console from fabric.contrib.files import exists from fabric import utils def _setup_path(): # TODO: something like # if not defined env.project_subdir: # env.project_subdir = env.project # env.project_root = os.path.join(env.home, env.project_subdir) # allow for the fabfile having set up some of these differently if not env.has_key('project_root'): env.project_root = os.path.join(env.home, env.project_dir) if not env.has_key('vcs_root'): env.vcs_root = os.path.join(env.project_root, 'dev') if not env.has_key('prev_root'): env.prev_root = os.path.join(env.project_root, 'previous') if not env.has_key('dump_dir'): env.dump_dir = os.path.join(env.project_root, 'dbdumps') if not env.has_key('deploy_root'): env.deploy_root = os.path.join(env.vcs_root, 'deploy') env.tasks_bin = os.path.join(env.deploy_root, 'tasks.py') if env.project_type == "django" and not env.has_key('django_dir'): env.django_dir = env.project if env.project_type == "django" and not env.has_key('django_root'): env.django_root = os.path.join(env.vcs_root, env.django_dir) if env.use_virtualenv: if not env.has_key('virtualenv_root'): env.virtualenv_root = os.path.join(env.django_root, '.ve') if not env.has_key('python_bin'): env.python_bin = os.path.join(env.virtualenv_root, 'bin', 'python2.6') if not env.has_key('settings'): env.settings = '%(project)s.settings' % env if not env.has_key('use_sudo'): env.use_sudo = True if not env.has_key('cvs_rsh'): env.cvs_rsh = 'CVS_RSH="ssh"' def _get_svn_user_and_pass(): if not env.has_key('svnuser') or len(env.svnuser) == 0: # prompt user for username prompt('Enter SVN username:', 'svnuser') if not env.has_key('svnpass') or len(env.svnpass) == 0: # prompt user for password env.svnpass = getpass.getpass('Enter SVN password:') def deploy_clean(revision=None): """ delete the entire install and do a clean install """ if env.environment == 'production': utils.abort('do not delete the production environment!!!') require('project_root', provided_by=env.valid_non_prod_envs) # TODO: dump before cleaning database? with settings(warn_only=True): apache_cmd('stop') clean_db() clean_files() deploy(revision) def clean_files(): sudo_or_run('rm -rf %s' % env.project_root) def _create_dir_if_not_exists(path): if not files.exists(path): sudo_or_run('mkdir -p %s' % path) def deploy(revision=None, keep=None): """ update remote host environment (virtualenv, deploy, update) It takes two arguments: * revision is the VCS revision ID to checkout (if not specified then the latest will be checked out) * keep is the number of old versions to keep around for rollback (default 5)""" require('project_root', provided_by=env.valid_envs) with settings(warn_only=True): apache_cmd('stop') _create_dir_if_not_exists(env.project_root) if files.exists(env.vcs_root): create_copy_for_rollback(keep) checkout_or_update(revision) if env.use_virtualenv: update_requirements() # if we're going to call tasks.py then this has to be done first: create_private_settings() link_local_settings() update_db() if env.project_type == "django": rm_pyc_files() if env.environment == 'production': setup_db_dumps() link_apache_conf() apache_cmd('start') def create_copy_for_rollback(keep): """Copy the current version out of the way so we can rollback to it if required.""" require('prev_root', 'vcs_root', 'tasks_bin', provided_by=env.valid_envs) # create directory for it prev_dir = os.path.join(env.prev_root, time.strftime("%Y-%m-%d_%H-%M-%S")) _create_dir_if_not_exists(prev_dir) # cp -a sudo_or_run('cp -a %s %s' % (env.vcs_root, prev_dir)) # dump database with cd(prev_dir): sudo_or_run(env.tasks_bin + ' dump_db') if keep == None or int(keep) > 0: delete_old_versions(keep) def delete_old_versions(keep=None): """Delete old rollback directories, keeping the last "keep" (default 5)".""" require('prev_root', provided_by=env.valid_envs) prev_versions = run('ls ' + env.prev_root).split('\n') if keep == None: if env.has_key('versions_to_keep'): keep = env.versions_to_keep else: keep = 5 versions_to_keep = -1 * int(keep) prev_versions_to_delete = prev_versions[:versions_to_keep] for version_to_delete in prev_versions_to_delete: with cd(env.prev_root): sudo_or_run('rm -rf ' + version_to_delete) def list_previous(): """List the previous versions available to rollback to.""" # could also determine the VCS revision number require('prev_root', provided_by=env.valid_envs) run('ls ' + env.prev_root) def rollback(version='last', migrate=False, restore_db=False): """Redeploy one of the old versions. Arguments are 'version', 'migrate' and 'restore_db': * if version is 'last' (the default) then the most recent version will be restored. Otherwise specify by timestamp - use list_previous to get a list of available versions. * if restore_db is True, then the database will be restored as well as the code. The default is False. * if migrate is True, then fabric will attempt to work out the new and old migration status and run the migrations to match the database versions. The default is False Note that migrate and restore_db cannot both be True.""" require('prev_root', 'vcs_root', 'tasks_bin', provided_by=env.valid_envs) if migrate and restore_db: utils.abort('rollback cannot do both migrate and restore_db') if migrate: utils.abort("rollback: haven't worked out how to do migrate yet ...") if version == 'last': # get the latest directory from prev_dir # list directories in env.prev_root, use last one version = run('ls ' + env.prev_root).split('\n')[-1] # check version specified exists rollback_dir_base = os.path.join(env.prev_root, version) rollback_dir = os.path.join(rollback_dir_base, 'dev') if not files.exists(rollback_dir): utils.abort("Cannot rollback to version %s, it does not exist, use list_previous to see versions available" % version) apache_cmd("stop") # first copy this version out of the way create_copy_for_rollback(-1) if migrate: # run the south migrations back to the old version # but how to work out what the old version is?? pass if restore_db: # feed the dump file into mysql command with cd(rollback_dir_base): sudo_or_run(env.tasks_bin + ' load_dbdump') # delete everything - don't want stray files left over sudo_or_run('rm -rf %s' % env.vcs_root) # cp -a from rollback_dir to vcs_root sudo_or_run('cp -a %s %s' % (rollback_dir, env.vcs_root)) apache_cmd("start") def local_test(): """ run the django tests on the local machine """ require('project') with cd(os.path.join("..", env.project)): local("python " + env.test_cmd, capture=False) def remote_test(): """ run the django tests remotely - staging only """ require('django_root', 'python_bin', 'test_cmd', provided_by=env.valid_non_prod_envs) with cd(env.django_root): sudo_or_run(env.python_bin + env.test_cmd) def version(): """ return the deployed VCS revision and commit comments""" require('project_root', 'repo_type', 'vcs_root', 'repository', provided_by=env.valid_envs) if env.repo_type == "git": with cd(env.vcs_root): sudo_or_run('git log | head -5') elif env.repo_type == "svn": _get_svn_user_and_pass() with cd(env.vcs_root): with hide('running'): cmd = 'svn log --non-interactive --username %s --password %s | head -4' % (env.svnuser, env.svnpass) sudo_or_run(cmd) else: utils.abort('Unsupported repo type: %s' % (env.repo_type)) def checkout_or_update(revision=None): """ checkout or update the project from version control. This command works with svn, git and cvs repositories. You can also specify a revision to checkout, as an argument.""" require('project_root', 'repo_type', 'vcs_root', 'repository', provided_by=env.valid_envs) if env.repo_type == "svn": _checkout_or_update_svn(revision) elif env.repo_type == "git": _checkout_or_update_git(revision) elif env.repo_type == "cvs": _checkout_or_update_cvs(revision) def _checkout_or_update_svn(revision=None): # function to ask for svnuser and svnpass _get_svn_user_and_pass() # if the .svn directory exists, do an update, otherwise do # a checkout cmd = 'svn %s --non-interactive --no-auth-cache --username %s --password %s' if files.exists(os.path.join(env.vcs_root, ".svn")): cmd = cmd % ('update', env.svnuser, env.svnpass) if revision: cmd += " --revision " + revision with cd(env.vcs_root): with hide('running'): sudo_or_run(cmd) else: cmd = cmd + " %s" cmd = cmd % ('checkout', env.svnuser, env.svnpass, env.repository) if revision: cmd += "@" + revision with cd(env.project_root): with hide('running'): sudo_or_run(cmd) def _checkout_or_update_git(revision=None): # if the .git directory exists, do an update, otherwise do # a clone if files.exists(os.path.join(env.vcs_root, ".git")): with cd(env.vcs_root): sudo_or_run('git pull') else: with cd(env.project_root): sudo_or_run('git clone %s dev' % env.repository) if revision: with cd(env.vcs_root): sudo_or_run('git checkout %s' % revision) if files.exists(os.path.join(env.vcs_root, ".gitmodules")): with cd(env.vcs_root): sudo_or_run('git submodule update --init') def _checkout_or_update_cvs(revision): if files.exists(env.vcs_root): with cd(env.vcs_root): sudo_or_run('CVS_RSH="ssh" cvs update -d -P') else: if env.has_key('cvs_user'): user_spec = env.cvs_user + "@" else: user_spec = "" with cd(env.project_root): cvs_options = '-d:%s:%s%s:%s' % (env.cvs_connection_type, user_spec, env.repository, env.repo_path) command_options = '-d dev' if revision is not None: command_options += ' -r ' + revision sudo_or_run('%s cvs %s checkout %s %s' % (env.cvs_rsh, cvs_options, command_options, env.cvs_project)) def sudo_or_run(command): if env.use_sudo: sudo(command) else: run(command) def update_requirements(): """ update external dependencies on remote host """ require('tasks_bin', provided_by=env.valid_envs) sudo_or_run(env.tasks_bin + ' update_ve') def clean_db(revision=None): """ delete the entire database """ if env.environment == 'production': utils.abort('do not delete the production database!!!') require('tasks_bin', provided_by=env.valid_non_prod_envs) sudo_or_run(env.tasks_bin + " clean_db") def update_db(use_migrations=False): """ create and/or update the database, do migrations etc """ require('tasks_bin', provided_by=env.valid_envs) sudo_or_run(env.tasks_bin + ' update_db:use_migrations=%s' % use_migrations) def setup_db_dumps(): """ set up mysql database dumps """ require('dump_dir', provided_by=env.valid_envs) sudo_or_run(env.tasks_bin + ' setup_db_dumps:' + env.dump_dir) def touch(): """ touch wsgi file to trigger reload """ require('vcs_root', provided_by=env.valid_envs) wsgi_dir = os.path.join(env.vcs_root, 'wsgi') sudo_or_run('touch ' + os.path.join(wsgi_dir, 'wsgi_handler.py')) def create_private_settings(): require('tasks_bin', provided_by=env.valid_envs) sudo_or_run(env.tasks_bin + ' create_private_settings') def link_local_settings(): """link the local_settings.py file for this environment""" require('tasks_bin', provided_by=env.valid_envs) sudo_or_run(env.tasks_bin + ' link_local_settings:' + env.environment) # check that settings imports local_settings, as it always should, # and if we forget to add that to our project, it could cause mysterious # failures if env.project_type == "django": run('grep -q "local_settings" %s' % os.path.join(env.django_root, 'settings.py')) # touch the wsgi file to reload apache touch() def rm_pyc_files(): """Remove all the old pyc files to prevent stale files being used""" require('django_root', provided_by=env.valid_envs) with settings(warn_only=True): with cd(env.django_root): sudo_or_run('find . -name \*.pyc | xargs rm') def link_apache_conf(): """link the apache.conf file""" require('vcs_root', provided_by=env.valid_envs) if env.use_apache == False: return conf_file = os.path.join(env.vcs_root, 'apache', env.environment+'.conf') apache_conf = os.path.join('/etc/httpd/conf.d', env.project+'_'+env.environment+'.conf') if not files.exists(conf_file): utils.abort('No apache conf file found - expected %s' % conf_file) if not files.exists(apache_conf): sudo('ln -s %s %s' % (conf_file, apache_conf)) configtest() def configtest(): """ test Apache configuration """ if env.use_apache: sudo('/usr/sbin/httpd -S') def apache_reload(): """ reload Apache on remote host """ apache_cmd('reload') def apache_restart(): """ restart Apache on remote host """ apache_cmd('restart') def apache_cmd(cmd): """ run cmd against apache init.d script """ if env.use_apache: sudo('/etc/init.d/httpd %s' % cmd)
gpl-3.0
-8,189,313,332,269,552,000
35.096774
126
0.612291
false
Keats/gutenberg
components/site/benches/gen.py
1
5070
""" Generates test sites for use in benchmark. Tested with python3 and probably does not work on Windows. """ import datetime import os import random import shutil TAGS = ["a", "b", "c", "d", "e", "f", "g"] CATEGORIES = ["c1", "c2", "c3", "c4"] PAGE = """ +++ title = "Hello" date = REPLACE_DATE [taxonomies] tags = REPLACE_TAG categories = ["REPLACE_CATEGORY"] +++ # Modus cognitius profanam ne duae virtutis mundi ## Ut vita Lorem markdownum litora, care ponto nomina, et ut aspicit gelidas sui et purpureo genuit. Tamen colla venientis [delphina](http://nil-sol.com/ecquis) Tusci et temptata citaeque curam isto ubi vult vulnere reppulit. - Seque vidit flendoque de quodam - Dabit minimos deiecto caputque noctis pluma - Leti coniunx est Helicen - Illius pulvereumque Icare inpositos - Vivunt pereo pluvio tot ramos Olenios gelidis - Quater teretes natura inde ### A subsection Protinus dicunt, breve per, et vivacis genus Orphei munere. Me terram [dimittere casside](http://corpus.org/) pervenit saxo primoque frequentat genuum sorori praeferre causas Libys. Illud in serpit adsuetam utrimque nunc haberent, **terrae si** veni! Hectoreis potes sumite [Mavortis retusa](http://tua.org/) granum captantur potuisse Minervae, frugum. > Clivo sub inprovisoque nostrum minus fama est, discordia patrem petebat precatur absumitur, poena per sit. Foramina *tamen cupidine* memor supplex tollentes dictum unam orbem, Anubis caecae. Viderat formosior tegebat satis, Aethiopasque sit submisso coniuge tristis ubi! ## Praeceps Corinthus totidem quem crus vultum cape ```rs #[derive(Debug)] pub struct Site { /// The base path of the zola site pub base_path: PathBuf, /// The parsed config for the site pub config: Config, pub pages: HashMap<PathBuf, Page>, pub sections: HashMap<PathBuf, Section>, pub tera: Tera, live_reload: bool, output_path: PathBuf, static_path: PathBuf, pub tags: Option<Taxonomy>, pub categories: Option<Taxonomy>, /// A map of all .md files (section and pages) and their permalink /// We need that if there are relative links in the content that need to be resolved pub permalinks: HashMap<String, String>, } ``` ## More stuff And a shortcode: {{ youtube(id="my_youtube_id") }} ### Another subsection Gotta make the toc do a little bit of work # A big title - hello - world - ! ```py if __name__ == "__main__": gen_site("basic-blog", [""], 250, paginate=True) ``` """ def gen_skeleton(name, is_blog): if os.path.exists(name): shutil.rmtree(name) os.makedirs(os.path.join(name, "content")) os.makedirs(os.path.join(name, "static")) with open(os.path.join(name, "config.toml"), "w") as f: if is_blog: f.write(""" title = "My site" base_url = "https://replace-this-with-your-url.com" theme = "sample" taxonomies = [ {name = "tags", rss = true}, {name = "categories"} ] [extra.author] name = "Vincent Prouillet" """) else: f.write(""" title = "My site" base_url = "https://replace-this-with-your-url.com" theme = "sample" [extra.author] name = "Vincent Prouillet" """) # Re-use the test templates shutil.copytree("../../../test_site/templates", os.path.join(name, "templates")) shutil.copytree("../../../test_site/themes", os.path.join(name, "themes")) def gen_section(path, num_pages, is_blog): with open(os.path.join(path, "_index.md"), "w") as f: if is_blog: f.write(""" +++ paginate_by = 5 sort_by = "date" template = "section_paginated.html" +++ """) else: f.write("+++\n+++\n") day = datetime.date.today() for (i, page) in enumerate(range(0, num_pages)): with open(os.path.join(path, "page-{}.md".format(i)), "w") as f: f.write( PAGE .replace("REPLACE_DATE", str(day + datetime.timedelta(days=1))) .replace("REPLACE_CATEGORY", random.choice(CATEGORIES)) .replace("REPLACE_TAG", str([random.choice(TAGS), random.choice(TAGS)])) ) def gen_site(name, sections, num_pages_per_section, is_blog=False): gen_skeleton(name, is_blog) for section in sections: path = os.path.join(name, "content", section) if section else os.path.join(name, "content") if section: os.makedirs(path) gen_section(path, num_pages_per_section, is_blog) if __name__ == "__main__": gen_site("small-blog", [""], 30, is_blog=True) gen_site("medium-blog", [""], 250, is_blog=True) gen_site("big-blog", [""], 1000, is_blog=True) gen_site("huge-blog", [""], 10000, is_blog=True) gen_site("extra-huge-blog", [""], 100000, is_blog=True) gen_site("small-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 10) gen_site("medium-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 100) gen_site("huge-kb", ["help", "help1", "help2", "help3", "help4", "help5", "help6", "help7", "help8", "help9"], 1000)
mit
-6,960,706,027,240,699,000
27.806818
121
0.641223
false
salcho/antares
ui/loggerWidget.py
1
1137
#!/usr/bin/env python import gtk import logging import time from ui.IWidget import IWidget from core.data import logger from core.log import addStreamHandler #TODO: Implement file-like thread to show logging! class loggerWidget(IWidget): def __init__(self): IWidget.__init__(self) self.frame = gtk.Frame('Logger') self.text_view = None def start(self): self.frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT) sw = gtk.ScrolledWindow() sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) self.text_view = gtk.TextView() self.text_view.set_editable(False) self.text_view.set_wrap_mode(gtk.WRAP_NONE) self.text_view.set_justification(gtk.JUSTIFY_LEFT) self.text_view.set_cursor_visible(True) sw.add_with_viewport(self.text_view) self.frame.add(sw) # Add handler to the logger handler = handlerClass() addStreamHandler(handler) def updateView(self, record): buf = self.text_view.get_buffer() buf.insert(buf.get_end_iter(), record) def getWidget(self): return self.frame class handlerClass(logging.StreamHandler): def emit(self, record): loggerWidget.updateView(record ) self.flush()
mit
3,276,869,649,480,475,600
24.863636
59
0.737907
false
lightbase/WSCacicNeo
wscacicneo/test/blacklist/test_unit_blacklist.py
1
3125
#!/usr/env python # -*- coding: utf-8 -*- __author__ = 'adley' import unittest import os from .. import settings here = os.path.abspath(os.path.dirname(__file__)) data_path = os.path.join(here, "../fixtures/") data_file = os.path.join(data_path, 'users/admin.json') class TestAttributesBlacklist(unittest.TestCase): """ Testa histórias relacionadas à lista de eliminação """ def setUp(self): """ Inicializa o Servidor de Teste """ from wscacicneo import main app = main({}, **settings) from webtest import TestApp self.testapp = TestApp(app) from wscacicneo.model import blacklist """ Cria a base da lista de eliminação (blacklist) """ blacklist_base = blacklist.BlacklistBase() result = blacklist_base.create_base() self.assertEqual(result, True) def test_insert_admin_blacklist(self): """ Insere um Item na base Blacklist """ from wscacicneo.model import blacklist from wscacicneo.test.security.test_profile import TestProfile ''' Verifica se possui permissão de Admin ''' TestProfile.test_permission_administrator(self) ''' Gera uma string aleatória ''' blacklist_obj = blacklist.Blacklist( item="Microsoft Office 2007" ) results = blacklist_obj.create_item() self.assertEqual(type(results), int) def test_delete_admin_blacklist(self): """ Remove um Item da base Blacklist """ from wscacicneo.model import blacklist from wscacicneo.utils.utils import Utils from wscacicneo.test.security.test_profile import TestProfile ''' Verifica se possui permissão de Admin ''' TestProfile.test_permission_administrator(self) ''' Gera uma string aleatória ''' random_name = Utils.random_string(8) blacklist_obj = blacklist.Blacklist( item=random_name ) id_new_data = blacklist_obj.create_item() results = blacklist_obj.delete_item(id_new_data) self.assertEqual(results, "DELETED") def test_list_items_blacklist(self): """ Lista todos os itens da base Blacklist """ from wscacicneo.model import blacklist from wscacicneo.test.security.test_profile import TestProfile ''' Verifica se possui permissão de Admin ''' TestProfile.test_permission_administrator(self) blacklist_obj = blacklist.Blacklist( item="name" ) list_items = blacklist_obj.search_list_items() result_count = list_items.result_count self.assertEqual(type(result_count), int, msg="O resultado não é um inteiro.") def tearDown(self): """ Apaga dados do Teste """ from wscacicneo.model import blacklist """ Exclui a base da lista de eliminação (blacklist) """ blacklist_base = blacklist.BlacklistBase() result = blacklist_base.remove_base() self.assertEqual(result, True)
gpl-2.0
-6,965,276,048,249,313,000
31.747368
86
0.619936
false
zestyr/lbry
lbrynet/dht/msgtypes.py
1
1593
#!/usr/bin/env python # # This library is free software, distributed under the terms of # the GNU Lesser General Public License Version 3, or any later version. # See the COPYING file included in this archive # # The docstrings in this module contain epytext markup; API documentation # may be created by processing this file with epydoc: http://epydoc.sf.net from lbrynet.core.utils import generate_id class Message(object): """ Base class for messages - all "unknown" messages use this class """ def __init__(self, rpcID, nodeID): self.id = rpcID self.nodeID = nodeID class RequestMessage(Message): """ Message containing an RPC request """ def __init__(self, nodeID, method, methodArgs, rpcID=None): if rpcID is None: rpcID = generate_id() Message.__init__(self, rpcID, nodeID) self.request = method self.args = methodArgs class ResponseMessage(Message): """ Message containing the result from a successful RPC request """ def __init__(self, rpcID, nodeID, response): Message.__init__(self, rpcID, nodeID) self.response = response class ErrorMessage(ResponseMessage): """ Message containing the error from an unsuccessful RPC request """ def __init__(self, rpcID, nodeID, exceptionType, errorMessage): ResponseMessage.__init__(self, rpcID, nodeID, errorMessage) if isinstance(exceptionType, type): self.exceptionType = '%s.%s' % (exceptionType.__module__, exceptionType.__name__) else: self.exceptionType = exceptionType
mit
4,668,161,594,579,503,000
32.1875
93
0.670433
false
shalzuth/BraveHaxvius
IDAScripts/GetNetworkKeys.py
1
2891
from idautils import * from idaapi import * def get_string(addr): out = "" while True: if Byte(addr) != 0: out += chr(Byte(addr)) else: break addr += 1 return out def get_string_from_head(head): refs = DataRefsFrom(head) for ref in refs: refs2 = DataRefsFrom(ref) for ref2 in refs2: stringval = get_string(ref2) return stringval def dumpkvp(functionName, addr, key): if key in functionName and 'Request' in functionName: functionName = functionName[3:] functionName = functionName[:functionName.index(key)] functionName = ''.join([i for i in functionName if not i.isdigit()]) functionName = functionName[:len(functionName)-7] for (startea, endea) in Chunks(addr): for head in Heads(startea, endea): operand = GetDisasm(head) if 'R0, [PC,R0]' in operand: #if ', =(' in operand: stringval = get_string_from_head(head) if key is 'getUrl': stringval = stringval[14:22] if 'action' in stringval: stringval = 'action' if not (functionName in requests): requests[functionName] = {} requests[functionName][key[3:]] = stringval if 'aActionsymbol' in operand: stringval = get_string_from_head(head) if key is 'getUrl': stringval = stringval[14:22] if 'action' in stringval: stringval = 'action' if not (functionName in requests): requests[functionName] = {} requests[functionName][key[3:]] = stringval def dumpbody(functionName, addr, key): if key in functionName and 'Request' in functionName: functionName = functionName[3:] functionName = functionName[:functionName.index(key)] functionName = ''.join([i for i in functionName if not i.isdigit()]) functionName = functionName[:len(functionName)-7] stringval = "" basenode = "" for (startea, endea) in Chunks(addr): for head in Heads(startea, endea): operand = GetDisasm(head) if 'mov' in operand and 'ds:(off' in operand: stringval = get_string_from_head(head) if '_ZN9JsonGroup7addNodeEv' in operand: if not (functionName in requests): requests[functionName] = {} if not ("Parameters" in requests[functionName]): requests[functionName]["Parameters"] = {} basenode = stringval requests[functionName]["Parameters"][basenode] = {} if '_ZN8JsonNode8addParamEPK' in operand: requests[functionName]["Parameters"][basenode] = stringval requests = {} for funcea in Functions(0x100000, 0x14ea010): functionName = GetFunctionName(funcea) dumpkvp(functionName, funcea, 'getUrl') dumpkvp(functionName, funcea, 'getRequestID') dumpkvp(functionName, funcea, 'getEncodeKey') #dumpbody(functionName, funcea, 'createBody') print requests import json filename = os.path.expanduser("~/OneDrive/Documents/GitHub/BraveHaxvius/DataExtractor/network2.json") with open(filename, 'w') as fp: json.dump(requests, fp)
mit
-4,050,094,355,223,870,000
31.863636
101
0.684192
false
myarjunar/QGIS
tests/src/python/test_qgsatlascomposition.py
1
12477
# -*- coding: utf-8 -*- ''' test_qgsatlascomposition.py -------------------------------------- Date : Oct 2012 Copyright : (C) 2012 by Dr. Hugo Mercier email : hugo dot mercier at oslandia dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ ''' import qgis # NOQA import os import glob import shutil import tempfile from qgis.testing import start_app, unittest from utilities import unitTestDataPath from qgis.PyQt.QtCore import QFileInfo, QRectF, qWarning from qgis.core import QgsVectorLayer, QgsProject, QgsCoordinateReferenceSystem, \ QgsComposition, QgsFillSymbol, QgsSingleSymbolRenderer, QgsComposerLabel, QgsComposerMap, QgsFontUtils, \ QgsRectangle, QgsComposerLegend, QgsFeature, QgsGeometry, QgsPoint, QgsRendererCategory, QgsCategorizedSymbolRenderer, QgsMarkerSymbol from qgscompositionchecker import QgsCompositionChecker start_app() class TestQgsAtlasComposition(unittest.TestCase): def testCase(self): self.TEST_DATA_DIR = unitTestDataPath() tmppath = tempfile.mkdtemp() for file in glob.glob(os.path.join(self.TEST_DATA_DIR, 'france_parts.*')): shutil.copy(os.path.join(self.TEST_DATA_DIR, file), tmppath) vectorFileInfo = QFileInfo(tmppath + "/france_parts.shp") mVectorLayer = QgsVectorLayer(vectorFileInfo.filePath(), vectorFileInfo.completeBaseName(), "ogr") QgsProject.instance().addMapLayers([mVectorLayer]) self.layers = [mVectorLayer] # create composition with composer map # select epsg:2154 crs = QgsCoordinateReferenceSystem() crs.createFromSrid(2154) QgsProject.instance().setCrs(crs) self.mComposition = QgsComposition(QgsProject.instance()) self.mComposition.setPaperSize(297, 210) # fix the renderer, fill with green props = {"color": "0,127,0"} fillSymbol = QgsFillSymbol.createSimple(props) renderer = QgsSingleSymbolRenderer(fillSymbol) mVectorLayer.setRenderer(renderer) # the atlas map self.mAtlasMap = QgsComposerMap(self.mComposition, 20, 20, 130, 130) self.mAtlasMap.setFrameEnabled(True) self.mAtlasMap.setLayers([mVectorLayer]) self.mComposition.addComposerMap(self.mAtlasMap) # the atlas self.mAtlas = self.mComposition.atlasComposition() self.mAtlas.setCoverageLayer(mVectorLayer) self.mAtlas.setEnabled(True) self.mComposition.setAtlasMode(QgsComposition.ExportAtlas) # an overview self.mOverview = QgsComposerMap(self.mComposition, 180, 20, 50, 50) self.mOverview.setFrameEnabled(True) self.mOverview.overview().setFrameMap(self.mAtlasMap.id()) self.mOverview.setLayers([mVectorLayer]) self.mComposition.addComposerMap(self.mOverview) nextent = QgsRectangle(49670.718, 6415139.086, 699672.519, 7065140.887) self.mOverview.setNewExtent(nextent) # set the fill symbol of the overview map props2 = {"color": "127,0,0,127"} fillSymbol2 = QgsFillSymbol.createSimple(props2) self.mOverview.overview().setFrameSymbol(fillSymbol2) # header label self.mLabel1 = QgsComposerLabel(self.mComposition) self.mComposition.addComposerLabel(self.mLabel1) self.mLabel1.setText("[% \"NAME_1\" %] area") self.mLabel1.setFont(QgsFontUtils.getStandardTestFont()) self.mLabel1.adjustSizeToText() self.mLabel1.setSceneRect(QRectF(150, 5, 60, 15)) qWarning( "header label font: %s exactMatch:%s" % (self.mLabel1.font().toString(), self.mLabel1.font().exactMatch())) # feature number label self.mLabel2 = QgsComposerLabel(self.mComposition) self.mComposition.addComposerLabel(self.mLabel2) self.mLabel2.setText("# [%@atlas_featurenumber || ' / ' || @atlas_totalfeatures%]") self.mLabel2.setFont(QgsFontUtils.getStandardTestFont()) self.mLabel2.adjustSizeToText() self.mLabel2.setSceneRect(QRectF(150, 200, 60, 15)) qWarning("feature number label font: %s exactMatch:%s" % ( self.mLabel2.font().toString(), self.mLabel2.font().exactMatch())) self.filename_test() self.autoscale_render_test() self.fixedscale_render_test() self.predefinedscales_render_test() self.hidden_render_test() self.legend_test() shutil.rmtree(tmppath, True) def filename_test(self): self.mAtlas.setFilenamePattern("'output_' || @atlas_featurenumber") self.mAtlas.beginRender() for i in range(0, self.mAtlas.numFeatures()): self.mAtlas.prepareForFeature(i) expected = "output_%d" % (i + 1) assert self.mAtlas.currentFilename() == expected self.mAtlas.endRender() def autoscale_render_test(self): self.mAtlasMap.setAtlasDriven(True) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Auto) self.mAtlasMap.setAtlasMargin(0.10) self.mAtlas.beginRender() for i in range(0, 2): self.mAtlas.prepareForFeature(i) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_autoscale%d' % (i + 1), self.mComposition) checker.setControlPathPrefix("atlas") myTestResult, myMessage = checker.testComposition(0, 200) assert myTestResult self.mAtlas.endRender() self.mAtlasMap.setAtlasDriven(False) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed) self.mAtlasMap.setAtlasMargin(0) def fixedscale_render_test(self): self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620)) self.mAtlasMap.setAtlasDriven(True) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed) self.mAtlas.beginRender() for i in range(0, 2): self.mAtlas.prepareForFeature(i) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_fixedscale%d' % (i + 1), self.mComposition) checker.setControlPathPrefix("atlas") myTestResult, myMessage = checker.testComposition(0, 200) assert myTestResult self.mAtlas.endRender() def predefinedscales_render_test(self): self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620)) self.mAtlasMap.setAtlasDriven(True) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Predefined) scales = [1800000, 5000000] self.mAtlas.setPredefinedScales(scales) for i, s in enumerate(self.mAtlas.predefinedScales()): assert s == scales[i] self.mAtlas.beginRender() for i in range(0, 2): self.mAtlas.prepareForFeature(i) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_predefinedscales%d' % (i + 1), self.mComposition) checker.setControlPathPrefix("atlas") myTestResult, myMessage = checker.testComposition(0, 200) assert myTestResult self.mAtlas.endRender() def hidden_render_test(self): self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620)) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed) self.mAtlas.setHideCoverage(True) self.mAtlas.beginRender() for i in range(0, 2): self.mAtlas.prepareForFeature(i) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_hiding%d' % (i + 1), self.mComposition) checker.setControlPathPrefix("atlas") myTestResult, myMessage = checker.testComposition(0, 200) assert myTestResult self.mAtlas.endRender() self.mAtlas.setHideCoverage(False) def sorting_render_test(self): self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620)) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed) self.mAtlas.setHideCoverage(False) self.mAtlas.setSortFeatures(True) self.mAtlas.setSortKeyAttributeIndex(4) # departement name self.mAtlas.setSortAscending(False) self.mAtlas.beginRender() for i in range(0, 2): self.mAtlas.prepareForFeature(i) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_sorting%d' % (i + 1), self.mComposition) checker.setControlPathPrefix("atlas") myTestResult, myMessage = checker.testComposition(0, 200) assert myTestResult self.mAtlas.endRender() def filtering_render_test(self): self.mAtlasMap.setNewExtent(QgsRectangle(209838.166, 6528781.020, 610491.166, 6920530.620)) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Fixed) self.mAtlas.setHideCoverage(False) self.mAtlas.setSortFeatures(False) self.mAtlas.setFilterFeatures(True) self.mAtlas.setFeatureFilter("substr(NAME_1,1,1)='P'") # select only 'Pays de la loire' self.mAtlas.beginRender() for i in range(0, 1): self.mAtlas.prepareForFeature(i) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_filtering%d' % (i + 1), self.mComposition) checker.setControlPathPrefix("atlas") myTestResult, myMessage = checker.testComposition(0, 200) assert myTestResult self.mAtlas.endRender() def legend_test(self): self.mAtlasMap.setAtlasDriven(True) self.mAtlasMap.setAtlasScalingMode(QgsComposerMap.Auto) self.mAtlasMap.setAtlasMargin(0.10) # add a point layer ptLayer = QgsVectorLayer("Point?crs=epsg:4326&field=attr:int(1)&field=label:string(20)", "points", "memory") pr = ptLayer.dataProvider() f1 = QgsFeature(1) f1.initAttributes(2) f1.setAttribute(0, 1) f1.setAttribute(1, "Test label 1") f1.setGeometry(QgsGeometry.fromPoint(QgsPoint(-0.638, 48.954))) f2 = QgsFeature(2) f2.initAttributes(2) f2.setAttribute(0, 2) f2.setAttribute(1, "Test label 2") f2.setGeometry(QgsGeometry.fromPoint(QgsPoint(-1.682, 48.550))) pr.addFeatures([f1, f2]) # categorized symbology r = QgsCategorizedSymbolRenderer("attr", [QgsRendererCategory(1, QgsMarkerSymbol.createSimple({"color": "255,0,0"}), "red"), QgsRendererCategory(2, QgsMarkerSymbol.createSimple({"color": "0,0,255"}), "blue")]) ptLayer.setRenderer(r) QgsProject.instance().addMapLayer(ptLayer) # add the point layer to the map settings layers = self.layers layers = [ptLayer] + layers self.mAtlasMap.setLayers(layers) self.mOverview.setLayers(layers) # add a legend legend = QgsComposerLegend(self.mComposition) legend.moveBy(200, 100) # sets the legend filter parameter legend.setComposerMap(self.mAtlasMap) legend.setLegendFilterOutAtlas(True) self.mComposition.addComposerLegend(legend) self.mAtlas.beginRender() self.mAtlas.prepareForFeature(0) self.mLabel1.adjustSizeToText() checker = QgsCompositionChecker('atlas_legend', self.mComposition) myTestResult, myMessage = checker.testComposition() assert myTestResult self.mAtlas.endRender() # restore state self.mAtlasMap.setLayers([layers[1]]) self.mComposition.removeComposerItem(legend) QgsProject.instance().removeMapLayer(ptLayer.id()) if __name__ == '__main__': unittest.main()
gpl-2.0
1,783,056,050,084,386,300
38.484177
138
0.639256
false
XKNX/xknx
test/remote_value_tests/remote_value_dpt_value_1_ucount_test.py
1
3237
"""Unit test for RemoteValueDptValue1Ucount objects.""" import pytest from xknx import XKNX from xknx.dpt import DPTArray, DPTBinary from xknx.exceptions import ConversionError, CouldNotParseTelegram from xknx.remote_value import RemoteValueDptValue1Ucount from xknx.telegram import GroupAddress, Telegram from xknx.telegram.apci import GroupValueWrite @pytest.mark.asyncio class TestRemoteValueDptValue1Ucount: """Test class for RemoteValueDptValue1Ucount objects.""" def test_to_knx(self): """Test to_knx function with normal operation.""" xknx = XKNX() remote_value = RemoteValueDptValue1Ucount(xknx) assert remote_value.to_knx(10) == DPTArray((0x0A,)) def test_from_knx(self): """Test from_knx function with normal operation.""" xknx = XKNX() remote_value = RemoteValueDptValue1Ucount(xknx) assert remote_value.from_knx(DPTArray((0x0A,))) == 10 def test_to_knx_error(self): """Test to_knx function with wrong parametern.""" xknx = XKNX() remote_value = RemoteValueDptValue1Ucount(xknx) with pytest.raises(ConversionError): remote_value.to_knx(256) with pytest.raises(ConversionError): remote_value.to_knx("256") async def test_set(self): """Test setting value.""" xknx = XKNX() remote_value = RemoteValueDptValue1Ucount( xknx, group_address=GroupAddress("1/2/3") ) await remote_value.set(10) assert xknx.telegrams.qsize() == 1 telegram = xknx.telegrams.get_nowait() assert telegram == Telegram( destination_address=GroupAddress("1/2/3"), payload=GroupValueWrite(DPTArray((0x0A,))), ) await remote_value.set(11) assert xknx.telegrams.qsize() == 1 telegram = xknx.telegrams.get_nowait() assert telegram == Telegram( destination_address=GroupAddress("1/2/3"), payload=GroupValueWrite(DPTArray((0x0B,))), ) async def test_process(self): """Test process telegram.""" xknx = XKNX() remote_value = RemoteValueDptValue1Ucount( xknx, group_address=GroupAddress("1/2/3") ) telegram = Telegram( destination_address=GroupAddress("1/2/3"), payload=GroupValueWrite(DPTArray((0x0A,))), ) await remote_value.process(telegram) assert remote_value.value == 10 async def test_to_process_error(self): """Test process errornous telegram.""" xknx = XKNX() remote_value = RemoteValueDptValue1Ucount( xknx, group_address=GroupAddress("1/2/3") ) with pytest.raises(CouldNotParseTelegram): telegram = Telegram( destination_address=GroupAddress("1/2/3"), payload=GroupValueWrite(DPTBinary(1)), ) await remote_value.process(telegram) with pytest.raises(CouldNotParseTelegram): telegram = Telegram( destination_address=GroupAddress("1/2/3"), payload=GroupValueWrite(DPTArray((0x64, 0x65))), ) await remote_value.process(telegram)
mit
-553,713,442,261,681,660
36.206897
66
0.624035
false
eRestin/MezzGIS
mezzanine/conf/forms.py
1
3040
from __future__ import unicode_literals from future.builtins import int from collections import defaultdict from django import forms from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from django.template.defaultfilters import urlize from mezzanine.conf import settings, registry from mezzanine.conf.models import Setting FIELD_TYPES = { bool: forms.BooleanField, int: forms.IntegerField, float: forms.FloatField, } class SettingsForm(forms.Form): """ Form for settings - creates a field for each setting in ``mezzanine.conf`` that is marked as editable. """ def __init__(self, *args, **kwargs): super(SettingsForm, self).__init__(*args, **kwargs) settings.use_editable() # Create a form field for each editable setting's from its type. for name in sorted(registry.keys()): setting = registry[name] if setting["editable"]: field_class = FIELD_TYPES.get(setting["type"], forms.CharField) kwargs = { "label": setting["label"] + ":", "required": setting["type"] in (int, float), "initial": getattr(settings, name), "help_text": self.format_help(setting["description"]), } if setting["choices"]: field_class = forms.ChoiceField kwargs["choices"] = setting["choices"] self.fields[name] = field_class(**kwargs) css_class = field_class.__name__.lower() self.fields[name].widget.attrs["class"] = css_class def __iter__(self): """ Calculate and apply a group heading to each field and order by the heading. """ fields = list(super(SettingsForm, self).__iter__()) group = lambda field: field.name.split("_", 1)[0].title() misc = _("Miscellaneous") groups = defaultdict(int) for field in fields: groups[group(field)] += 1 for (i, field) in enumerate(fields): setattr(fields[i], "group", group(field)) if groups[fields[i].group] == 1: fields[i].group = misc return iter(sorted(fields, key=lambda x: (x.group == misc, x.group))) def save(self): """ Save each of the settings to the DB. """ for (name, value) in self.cleaned_data.items(): setting_obj, created = Setting.objects.get_or_create(name=name) setting_obj.value = value setting_obj.save() def format_help(self, description): """ Format the setting's description into HTML. """ for bold in ("``", "*"): parts = [] for i, s in enumerate(description.split(bold)): parts.append(s if i % 2 == 0 else "<b>%s</b>" % s) description = "".join(parts) return mark_safe(urlize(description).replace("\n", "<br>"))
bsd-2-clause
8,096,700,865,729,011,000
35.190476
79
0.565789
false
volab/pyvorcv
setup.py
1
3971
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # VoR-CV # The MIT License # # Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup import sys from vorcv import __version__ as VERSION # See : http://pypi.python.org/pypi?%3Aaction=list_classifiers CLASSIFIERS = ['Development Status :: 4 - Beta', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.4', 'Topic :: Multimedia :: Video', 'Topic :: Multimedia :: Video :: Capture', 'Topic :: Scientific/Engineering :: Image Recognition', 'Topic :: Scientific/Engineering :: Artificial Intelligence'] KEYWORDS = 'opencv image recognition robotics' # You can either specify manually the list of packages to include in the # distribution or use "setuptools.find_packages()" to include them # automatically with a recursive search (from the root directory of the # project). #PACKAGES = find_packages() PACKAGES = ['vorcv'] # The following list contains all dependencies that Python will try to # install with this project INSTALL_REQUIRES = ['numpy'] #INSTALL_REQUIRES = [] SCRIPTS = ["scripts/vorcv-demo", "scripts/vorcv-circle-detection-calibration"] # Entry point can be used to create plugins or to automatically generate # system commands to call specific functions. # Syntax: "name_of_the_command_to_make = package.module:function". ENTRY_POINTS = {} #ENTRY_POINTS = { # 'console_scripts': [ # 'vorcv-demo = vorcv.demo:main', # ], #} README_FILE = 'README.rst' def get_long_description(): with open(README_FILE, 'r') as fd: desc = fd.read() return desc setup(author='Jeremie DECOCK', author_email='[email protected]', maintainer='Jeremie DECOCK', maintainer_email='[email protected]', name='pyvorcv', description="The PyVoR-CV project, a computer vision library made for some VoRobotics projects (VoR11, VoR12, ...).", long_description=get_long_description(), url='http://www.jdhp.org/', download_url='http://www.jdhp.org/',# Where the package can be downloaded classifiers=CLASSIFIERS, #license='MIT', # Useless if license is already in CLASSIFIERS keywords=KEYWORDS, packages=PACKAGES, include_package_data=True, # Use the MANIFEST.in file install_requires=INSTALL_REQUIRES, #platforms=['Linux'], #requires=['pyserial'], scripts=SCRIPTS, entry_points=ENTRY_POINTS, version=VERSION)
mit
7,094,162,803,281,906,000
32.940171
123
0.684211
false
robertnishihara/ray
python/ray/dashboard/node_stats.py
1
13362
from collections import defaultdict from ray.dashboard.util import to_unix_time, format_reply_id from base64 import b64decode import ray import threading import json import traceback import copy import logging from datetime import datetime import time from typing import Dict import re from operator import itemgetter logger = logging.getLogger(__name__) PYCLASSNAME_RE = re.compile(r"(.+?)\(") def _group_actors_by_python_class(actors): groups = defaultdict(list) for actor in actors.values(): actor_title = actor.get("actorTitle") if not actor_title: groups["Unknown Class"].append(actor) else: match = PYCLASSNAME_RE.search(actor_title) if match: # Catches case of actorTitle like # Foo(bar, baz, [1,2,3]) -> Foo class_name = match.groups()[0] groups[class_name].append(actor) else: # Catches case of e.g. just Foo # in case of actor task groups[actor_title].append(actor) return groups def _get_actor_group_stats(group): state_to_count = defaultdict(lambda: 0) executed_tasks = 0 min_timestamp = None num_timestamps = 0 sum_timestamps = 0 now = time.time() * 1000 # convert S -> MS for actor in group: state_to_count[actor["state"]] += 1 if "timestamp" in actor: if not min_timestamp or actor["timestamp"] < min_timestamp: min_timestamp = actor["timestamp"] num_timestamps += 1 sum_timestamps += now - actor["timestamp"] if "numExecutedTasks" in actor: executed_tasks += actor["numExecutedTasks"] if num_timestamps > 0: avg_lifetime = int((sum_timestamps / num_timestamps) / 1000) max_lifetime = int((now - min_timestamp) / 1000) else: avg_lifetime = 0 max_lifetime = 0 return { "stateToCount": state_to_count, "avgLifetime": avg_lifetime, "maxLifetime": max_lifetime, "numExecutedTasks": executed_tasks, } class NodeStats(threading.Thread): def __init__(self, redis_address, redis_password=None): self.redis_key = "{}.*".format(ray.gcs_utils.REPORTER_CHANNEL) self.redis_client = ray.services.create_redis_client( redis_address, password=redis_password) self._node_stats = {} self._ip_to_hostname = {} self._addr_to_owner_addr = {} self._addr_to_actor_id = {} self._addr_to_extra_info_dict = {} self._node_stats_lock = threading.Lock() self._default_info = { "actorId": "", "children": {}, "currentTaskFuncDesc": [], "ipAddress": "", "jobId": "", "numExecutedTasks": 0, "numLocalObjects": 0, "numObjectRefsInScope": 0, "port": 0, "state": 0, "taskQueueLength": 0, "usedObjectStoreMemory": 0, "usedResources": {}, } # Mapping from IP address to PID to list of log lines self._logs = defaultdict(lambda: defaultdict(list)) # Mapping from IP address to PID to list of error messages self._errors = defaultdict(lambda: defaultdict(list)) ray.state.state._initialize_global_state( redis_address=redis_address, redis_password=redis_password) super().__init__() def _insert_log_counts(self): for ip, logs_by_pid in self._logs.items(): hostname = self._ip_to_hostname.get(ip) if not hostname or hostname not in self._node_stats: continue logs_by_pid = {pid: len(logs) for pid, logs in logs_by_pid.items()} self._node_stats[hostname]["log_count"] = logs_by_pid def _insert_error_counts(self): for ip, errs_by_pid in self._errors.items(): hostname = self._ip_to_hostname.get(ip) if not hostname or hostname not in self._node_stats: continue errs_by_pid = {pid: len(errs) for pid, errs in errs_by_pid.items()} self._node_stats[hostname]["error_count"] = errs_by_pid def _purge_outdated_stats(self): def current(then, now): if (now - then) > 5: return False return True now = to_unix_time(datetime.utcnow()) self._node_stats = { k: v for k, v in self._node_stats.items() if current(v["now"], now) } def get_node_stats(self): with self._node_stats_lock: self._purge_outdated_stats() self._insert_error_counts() self._insert_log_counts() node_stats = sorted( (v for v in self._node_stats.values()), key=itemgetter("boot_time")) return {"clients": node_stats} # Gets actors in a flat way to allow for grouping by actor type. def get_actors(self, workers_info_by_node, infeasible_tasks, ready_tasks): now = time.time() actors: Dict[str, Dict[str, any]] = {} # construct flattened actor tree with self._node_stats_lock: for addr, actor_id in self._addr_to_actor_id.items(): actors[actor_id] = copy.deepcopy(self._default_info) actors[actor_id].update(self._addr_to_extra_info_dict[addr]) for node_id, workers_info in workers_info_by_node.items(): for worker_info in workers_info: if "coreWorkerStats" in worker_info: core_worker_stats = worker_info["coreWorkerStats"] addr = (core_worker_stats["ipAddress"], str(core_worker_stats["port"])) if addr in self._addr_to_actor_id: actor_info = actors[self._addr_to_actor_id[addr]] format_reply_id(core_worker_stats) actor_info.update(core_worker_stats) actor_info["averageTaskExecutionSpeed"] = round( actor_info["numExecutedTasks"] / (now - actor_info["timestamp"] / 1000), 2) actor_info["nodeId"] = node_id actor_info["pid"] = worker_info["pid"] def _update_from_actor_tasks(task, task_spec_type, invalid_state_type): actor_id = ray.utils.binary_to_hex( b64decode(task[task_spec_type]["actorId"])) if invalid_state_type == "pendingActor": task["state"] = -1 elif invalid_state_type == "infeasibleActor": task["state"] = -2 else: raise ValueError(f"Invalid argument" "invalid_state_type={invalid_state_type}") task["actorTitle"] = task["functionDescriptor"][ "pythonFunctionDescriptor"]["className"] format_reply_id(task) actors[actor_id] = task for infeasible_task in infeasible_tasks: _update_from_actor_tasks(infeasible_task, "actorCreationTaskSpec", "infeasibleActor") for ready_task in ready_tasks: _update_from_actor_tasks(ready_task, "actorCreationTaskSpec", "pendingActor") actor_groups = _group_actors_by_python_class(actors) stats_by_group = { name: _get_actor_group_stats(group) for name, group in actor_groups.items() } response_data = {} for name, group in actor_groups.items(): response_data[name] = { "entries": group, "summary": stats_by_group[name] } return response_data def get_logs(self, hostname, pid): ip = self._node_stats.get(hostname, {"ip": None})["ip"] logs = self._logs.get(ip, {}) if pid: logs = {pid: logs.get(pid, [])} return logs def get_errors(self, hostname, pid): ip = self._node_stats.get(hostname, {"ip": None})["ip"] errors = self._errors.get(ip, {}) if pid: errors = {pid: errors.get(pid, [])} return errors def run(self): p = self.redis_client.pubsub(ignore_subscribe_messages=True) p.psubscribe(self.redis_key) logger.info("NodeStats: subscribed to {}".format(self.redis_key)) log_channel = ray.gcs_utils.LOG_FILE_CHANNEL p.subscribe(log_channel) logger.info("NodeStats: subscribed to {}".format(log_channel)) error_channel = ray.gcs_utils.RAY_ERROR_PUBSUB_PATTERN p.psubscribe(error_channel) logger.info("NodeStats: subscribed to {}".format(error_channel)) actor_channel = ray.gcs_utils.RAY_ACTOR_PUBSUB_PATTERN p.psubscribe(actor_channel) logger.info("NodeStats: subscribed to {}".format(actor_channel)) current_actor_table = ray.actors() with self._node_stats_lock: for actor_data in current_actor_table.values(): addr = (actor_data["Address"]["IPAddress"], str(actor_data["Address"]["Port"])) owner_addr = (actor_data["OwnerAddress"]["IPAddress"], str(actor_data["OwnerAddress"]["Port"])) self._addr_to_owner_addr[addr] = owner_addr self._addr_to_actor_id[addr] = actor_data["ActorID"] self._addr_to_extra_info_dict[addr] = { "jobId": actor_data["JobID"], "state": actor_data["State"], "timestamp": actor_data["Timestamp"] } for x in p.listen(): try: with self._node_stats_lock: channel = ray.utils.decode(x["channel"])\ if "pattern" not in x or x["pattern"] is None\ else x["pattern"] data = x["data"] if channel == log_channel: data = json.loads(ray.utils.decode(data)) ip = data["ip"] pid = str(data["pid"]) self._logs[ip][pid].extend(data["lines"]) elif channel == str(error_channel): pubsub_msg = ray.gcs_utils.PubSubMessage.FromString( data) error_data = ray.gcs_utils.ErrorTableData.FromString( pubsub_msg.data) message = error_data.error_message message = re.sub(r"\x1b\[\d+m", "", message) match = re.search(r"\(pid=(\d+), ip=(.*?)\)", message) if match: pid = match.group(1) ip = match.group(2) self._errors[ip][pid].append({ "message": message, "timestamp": error_data.timestamp, "type": error_data.type }) elif channel == actor_channel: pubsub_msg = ray.gcs_utils.PubSubMessage.FromString( data) actor_data = ray.gcs_utils.ActorTableData.FromString( pubsub_msg.data) addr = (actor_data.address.ip_address, str(actor_data.address.port)) owner_addr = (actor_data.owner_address.ip_address, str(actor_data.owner_address.port)) self._addr_to_owner_addr[addr] = owner_addr self._addr_to_actor_id[addr] = ray.utils.binary_to_hex( actor_data.actor_id) self._addr_to_extra_info_dict[addr] = { "jobId": ray.utils.binary_to_hex( actor_data.job_id), "state": actor_data.state, "timestamp": actor_data.timestamp } elif channel == ray.gcs_utils.RAY_REPORTER_PUBSUB_PATTERN: data = json.loads(ray.utils.decode(data)) self._ip_to_hostname[data["ip"]] = data["hostname"] self._node_stats[data["hostname"]] = data else: try: data = json.loads(ray.utils.decode(data)) except Exception as e: data = f"Failed to load data because of {e}" logger.warning("Unexpected channel data received, " f"channel: {channel}, data: {data}") except Exception: logger.exception(traceback.format_exc()) continue
apache-2.0
5,768,132,720,916,335,000
40.496894
79
0.503068
false
rdio/sentry
src/sentry/web/frontend/projects/plugins.py
1
5548
""" sentry.web.frontend.projects.plugins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from django.contrib import messages from django.core.context_processors import csrf from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.views.decorators.csrf import csrf_protect from django.utils.translation import ugettext_lazy as _ from sentry.constants import MEMBER_OWNER from sentry.plugins import plugins from sentry.web.decorators import has_access from sentry.web.helpers import render_to_response, plugin_config @has_access(MEMBER_OWNER) @csrf_protect def manage_plugins(request, team, project): result = plugins.first('has_perm', request.user, 'configure_project_plugin', project) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) if request.POST: enabled = set(request.POST.getlist('plugin')) for plugin in plugins.all(): if plugin.can_enable_for_projects(): plugin.set_option('enabled', plugin.slug in enabled, project) messages.add_message( request, messages.SUCCESS, _('Your settings were saved successfully.')) return HttpResponseRedirect(request.path) context = csrf(request) context.update({ 'team': team, 'page': 'plugins', 'project': project, }) return render_to_response('sentry/projects/plugins/list.html', context, request) @has_access(MEMBER_OWNER) @csrf_protect def configure_project_plugin(request, team, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.team.slug, project.slug])) if not plugin.can_enable_for_projects(): return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.team.slug, project.slug])) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) form = plugin.project_conf_form if form is None: return HttpResponseRedirect(reverse('sentry-manage-project', args=[project.team.slug, project.slug])) action, view = plugin_config(plugin, project, request) if action == 'redirect': messages.add_message( request, messages.SUCCESS, _('Your settings were saved successfully.')) return HttpResponseRedirect(request.path) context = csrf(request) context.update({ 'team': team, 'page': 'plugin', 'title': plugin.get_title(), 'view': view, 'project': project, 'plugin': plugin, 'plugin_is_enabled': plugin.is_enabled(project), }) return render_to_response('sentry/projects/plugins/configure.html', context, request) @has_access(MEMBER_OWNER) @csrf_protect def reset_project_plugin(request, team, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug])) if not plugin.is_enabled(project): return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug])) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) plugin.reset_options(project=project) return HttpResponseRedirect(reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug])) @has_access(MEMBER_OWNER) @csrf_protect def enable_project_plugin(request, team, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-manage-project-plugins', args=[project.team.slug, project.slug])) redirect_to = reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug]) if plugin.is_enabled(project) or not plugin.can_enable_for_projects(): return HttpResponseRedirect(redirect_to) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) plugin.set_option('enabled', True, project) return HttpResponseRedirect(redirect_to) @has_access(MEMBER_OWNER) @csrf_protect def disable_project_plugin(request, team, project, slug): try: plugin = plugins.get(slug) except KeyError: return HttpResponseRedirect(reverse('sentry-manage-project-plugins', args=[project.team.slug, project.slug])) redirect_to = reverse('sentry-configure-project-plugin', args=[project.team.slug, project.slug, slug]) if not (plugin.can_disable and plugin.is_enabled(project) and plugin.can_enable_for_projects()): return HttpResponseRedirect(redirect_to) result = plugins.first('has_perm', request.user, 'configure_project_plugin', project, plugin) if result is False and not request.user.is_superuser: return HttpResponseRedirect(reverse('sentry')) plugin.set_option('enabled', False, project) return HttpResponseRedirect(redirect_to)
bsd-3-clause
-1,399,992,528,082,685,000
35.5
125
0.701154
false
0/realtimepork
realtimepork/gpu.py
1
1605
""" GPU utilities. """ from functools import wraps from math import ceil # Load everything we need in this module from PyCUDA (but don't autoinit until # requested). try: from pycuda.tools import DeviceData except ImportError: _pycuda_available = False else: _pycuda_available = True # Is this thing on? _enabled = False class PyCUDAMissingError(Exception): pass def _require_pycuda(f): @wraps(f) def wrapper(*args, **kwargs): if not _pycuda_available: raise PyCUDAMissingError('Unable to load PyCUDA.') return f(*args, **kwargs) return wrapper @_require_pycuda def enable(): """ Initialize the GPU machinery. """ global _enabled if _enabled: return import pycuda.autoinit _enabled = True def is_enabled(): """ Check whether the GPU is available and initialized. """ return _enabled @_require_pycuda def carve_array(xn, yn): """ Determine the best grid and block sizes given the input size. Parameters: xn: Size in the x direction (shorter stride). yn: Size in the y direction (longer stride). Returns: Grid size tuple, block size tuple. """ dev = DeviceData() # Align with the warp size in the x direction and use what remains for the # y direction. x_threads = dev.warp_size y_threads = dev.max_threads // x_threads assert x_threads * y_threads <= dev.max_threads x_blocks = int(ceil(xn / x_threads)) y_blocks = int(ceil(yn / y_threads)) return (x_blocks, y_blocks), (x_threads, y_threads, 1)
mit
6,945,346,836,958,899,000
17.662791
78
0.640498
false
ronniel1/Voodoo-Mock
voodoo/unittests/test_c_parsing.py
1
16651
import unittest import savingiterator import pprint import tools class TestCParsing( unittest.TestCase ): def setUp( self ): self.maxDiff = None def _simpleTest( self, contents, expected ): tested = savingiterator.SavingIterator() with tools.temporaryFile( contents ) as contentsFile: tested.process( contentsFile ) if tested.saved != expected: pprint.pprint( tested.saved ) pprint.pprint( expected ) self.assertEquals( tested.saved, expected ) def test_structDeclaration( self ): self._simpleTest( "struct name_of_struct;", [ dict( callbackName = "structForwardDeclaration", name = "name_of_struct" ) ] ) def test_emptyStructDefinition( self ): self._simpleTest( "struct name_of_struct {};", [ dict( callbackName = "enterStruct", name = "name_of_struct", fullTextNaked = "structname_of_struct{}", inheritance = [], templatePrefix = "", templateParametersList = None ), dict( callbackName = "leaveStruct" ), ] ) def test_globalInteger( self ): self._simpleTest( "int global;", [ dict( callbackName = "variableDeclaration", name = "global", text = "int global" ), ] ) def test_globalPointer( self ): self._simpleTest( "char * global;", [ dict( callbackName = "variableDeclaration", name = "global", text = "char * global" ), ] ) def test_globalConstPointer( self ): self._simpleTest( "const char * global;", [ dict( callbackName = "variableDeclaration", name = "global", text = "const char * global" ), ] ) def test_globalTypedef( self ): self._simpleTest( "typedef const char * stringTypedef;", [ dict( callbackName = "typedef", name = "stringTypedef", text = "typedef const char * stringTypedef" ), ] ) def test_globalVoidFunctionForwardDeclaration( self ): self._simpleTest( "void aFunction();", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", parameters = [], text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False ), ] ) def test_globalIntFunctionForwardDeclaration( self ): self._simpleTest( "int aFunction();", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", parameters = [], text = "int aFunction", returnRValue = False, returnType = "int", static = False, const = False, virtual = False ), ] ) def test_globalConstIntFunctionForwardDeclaration( self ): self._simpleTest( "const int aFunction();", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", parameters = [], text = "const int aFunction", returnRValue = False, returnType = "const int", static = False, const = False, virtual = False ), ] ) def test_globalConstCharPFunctionForwardDeclaration( self ): self._simpleTest( "const char * aFunction();", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", parameters = [], text = "const char * aFunction", returnRValue = False, returnType = "const char *", static = False, const = False, virtual = False ), ] ) def test_globalConstCharPConstFunctionForwardDeclaration( self ): self._simpleTest( "const char * const aFunction();", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", parameters = [], text = "const char * const aFunction", returnRValue = False, returnType = "const char * const", static = False, const = False, virtual = False ), ] ) def test_globalUnsignedLongLongFunctionForwardDeclaration( self ): self._simpleTest( "unsigned long long aFunction();", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", parameters = [], text = "unsigned long long aFunction", returnRValue = False, returnType = "unsigned long long", static = False, const = False, virtual = False ), ] ) def test_globalVoidFunctionForwardDeclarationIntParameter( self ): self._simpleTest( "void aFunction( int a );", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False, parameters = [ dict( name = "a", text = "int a", isParameterPack = False ) ] ), ] ) def test_globalVoidFunctionForwardDeclarationIntConstCharPParameter( self ): self._simpleTest( "void aFunction( int a, const char * p );", [ dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "aFunction", text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False, parameters = [ dict( name = "a", text = "int a", isParameterPack = False ), dict( name = "p", text = "const char * p", isParameterPack = False ), ] ), ] ) def test_VoidFunctionPointerDefinition( self ): self._simpleTest( "void (*aFunction)();", [ dict( callbackName = "variableDeclaration", name = "aFunction", text = "void ( * aFunction ) ( )" ), ] ) def test_TypedefVoidFunctionPointerDefinition( self ): self._simpleTest( "typedef void (*aFunction)();", [ dict( callbackName = "typedef", name = "aFunction", text = "typedef void ( * aFunction ) ( )" ), ] ) def test_Enum( self ): self._simpleTest( "enum EnumName { A = 1, B, C = 2 };", [ dict( callbackName = "enum", name = "EnumName", text = "enum EnumName { A = 1 , B , C = 2 }" ), ] ) def test_globalVoidFunctionDefinition( self ): self._simpleTest( "void aFunction() {}", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "aFunction", parameters = [], text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False ), ] ) def test_globalVoidFunctionDefinitionWithInts( self ): self._simpleTest( "int aFunction( int a ) { return a; }", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "aFunction", text = "int aFunction", returnRValue = False, returnType = "int", static = False, const = False, virtual = False, parameters = [ dict( name = "a", text = "int a", isParameterPack = False ) ] ), ] ) def test_globalVoidFunctionDefinitionStatic( self ): self._simpleTest( "static void aFunction() {}", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "aFunction", parameters = [], text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False ), ] ) def test_globalVoidFunctionDefinitionInline( self ): self._simpleTest( "inline void aFunction() {}", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "aFunction", parameters = [], text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False ), ] ) def test_globalVoidFunctionDefinitionStaticInline( self ): self._simpleTest( "static inline void aFunction() {}", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "aFunction", parameters = [], text = "void aFunction", returnRValue = False, returnType = "void", static = False, const = False, virtual = False ), ] ) def test_nonEmptyStructDefinition( self ): self._simpleTest( "struct name_of_struct { int a; const char * b; };", [ dict( callbackName = "enterStruct", name = "name_of_struct", inheritance = [], fullTextNaked = "structname_of_struct{inta;constchar*b;}", templatePrefix = "", templateParametersList = None ), dict( callbackName = "fieldDeclaration", name = "a", text = "int a" ), dict( callbackName = "fieldDeclaration", name = "b", text = "const char * b" ), dict( callbackName = "leaveStruct" ), ] ) def test_globalVoidFunctionDefinitionWithStructPointers( self ): self._simpleTest( "const struct S * aFunction( const struct S * s ) { return 0; }", [ dict( callbackName = 'structForwardDeclaration', name = 'S' ), dict( callbackName = "functionDefinition", templatePrefix = "", name = "aFunction", text = "const struct S * aFunction", returnRValue = False, returnType = "const struct S *", static = False, const = False, virtual = False, parameters = [ dict( name = "s", text = "const struct S * s", isParameterPack = False ) ] ), ] ) def test_nonEmptyStructTypdefDefinition( self ): self._simpleTest( "typedef struct name_of_struct { int a; const char * b; } struct_t;", [ dict( callbackName = "enterStruct", name = "name_of_struct", inheritance = [], fullTextNaked = "structname_of_struct{inta;constchar*b;}", templatePrefix = "", templateParametersList = None ), dict( callbackName = "fieldDeclaration", name = "a", text = "int a" ), dict( callbackName = "fieldDeclaration", name = "b", text = "const char * b" ), dict( callbackName = "leaveStruct" ), dict( callbackName = "typedef", name = "struct_t", text = "typedef struct name_of_struct struct_t" ) ] ) def test_useTypedef( self ): self._simpleTest( "typedef int Int;\nInt i;", [ dict( callbackName = "typedef", name = "Int", text = "typedef int Int" ), dict( callbackName = "variableDeclaration", name = "i", text = "Int i" ), ] ) def _parseError( self, contents ): tested = savingiterator.SavingIterator() tested.printErrors = False with tools.temporaryFile( contents ) as contentsFile: try: tested.process( contentsFile ) except: return else: raise Exception( "Expected parsing to fail" ) def test_unknownType( self ): self._parseError( "Int i;" ) def _testInclude( self, contents1, contents2, expected ): tested = savingiterator.SavingIterator() with tools.temporaryFile( contents1 ) as contentsFile1: contents2 = '#include "%s"\n%s' % ( contentsFile1, contents2 ) with tools.temporaryFile( contents2 ) as contentsFile2: tested.process( contentsFile2 ) if tested.saved != expected: pprint.pprint( tested.saved ) pprint.pprint( expected ) self.assertEquals( tested.saved, expected ) def test_includeScenario( self ): self._testInclude( "typedef int Int;", "Int i;", [ dict( callbackName = "variableDeclaration", name = "i", text = "Int i" ), ] ) def test_realCase( self ): self._simpleTest( """ struct net_device {}; struct net {}; extern struct net init_net; extern struct net_device * dev_get_by_name(struct net *net, const char *name); extern void dev_put(struct net_device *dev); """, [ dict( callbackName = "enterStruct", name = 'net_device', inheritance = [], fullTextNaked = "structnet_device{}", templatePrefix = "", templateParametersList = None ), dict( callbackName = "leaveStruct" ), dict( callbackName = "enterStruct", name = 'net', inheritance = [], fullTextNaked = "structnet{}", templatePrefix = "", templateParametersList = None ), dict( callbackName = "leaveStruct" ), dict( callbackName = "variableDeclaration", name = "init_net", text = "extern struct net init_net" ), dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "dev_get_by_name", text = "struct net_device * dev_get_by_name", returnRValue = False, returnType = "struct net_device *", static = False, const = False, virtual = False, parameters = [ dict( name = "net", text = "struct net * net", isParameterPack = False ), dict( name = "name", text = "const char * name", isParameterPack = False ) ] ), dict( callbackName = 'functionForwardDeclaration', name = 'dev_put', parameters = [ dict( name = 'dev', text = 'struct net_device * dev', isParameterPack = False ) ], returnRValue = False, returnType = 'void', static = False, templatePrefix = '', text = 'void dev_put', const = False, virtual = False ) ] ) def test_defines( self ): contents = "DEFINESTRUCT name_of_struct;" tested = savingiterator.SavingIterator() with tools.temporaryFile( contents ) as contentsFile: tested.process( contentsFile, defines = [ "DEFINESTRUCT=struct" ] ) expected = [ dict( callbackName = "structForwardDeclaration", name = "name_of_struct" ) ] if tested.saved != expected: pprint.pprint( tested.saved ) pprint.pprint( expected ) self.assertEquals( tested.saved, expected ) def test_BugfixTypedef1( self ): self._simpleTest( "typedef unsigned long a_size_t; a_size_t defunc();", [ dict( callbackName = "typedef", name = "a_size_t", text = "typedef unsigned long a_size_t" ), dict( callbackName = "functionForwardDeclaration", templatePrefix = "", name = "defunc", text = "a_size_t defunc", returnRValue = False, returnType = "a_size_t", static = False, parameters = [], const = False, virtual = False ), ] ) def test_StaticVariableShouldNotRemainStaticToAvoidCompilationError( self ): self._simpleTest( "static int i;", [ dict( callbackName = "variableDeclaration", name = "i", text = "int i" ) ] ) def test_BugfixRecursiveBraces( self ): self._simpleTest( "int func() { if ( 1 ) { return 1; } else { return 0; } }", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "func", text = "int func", returnRValue = False, returnType = "int", static = False, parameters = [], const = False, virtual = False ), ] ) def test_BugfixHashPoundParsedIntoFunctionDeclarationSyntaticUnit( self ): self._simpleTest( "#if 1\nint func() { return 0; }\n#endif", [ dict( callbackName = "functionDefinition", templatePrefix = "", name = "func", text = "int func", returnRValue = False, returnType = "int", static = False, parameters = [], const = False, virtual = False ), ] ) def test_unionDeclaration( self ): self._simpleTest( "union a { int b; int c; };", [ dict( callbackName = "union", name = "a", text = "union a { int b ; int c ; }" ), ] ) def test_anonymousUnionMember( self ): self._simpleTest( "struct a { union { int b; int c; } d; };", [ dict( callbackName = "enterStruct", name = "a", fullTextNaked = "structa{union{intb;intc;}d;}", inheritance = [], templatePrefix = "", templateParametersList = None ), dict( callbackName = "fieldDeclaration", name = "d", text = "union { int b ; int c ; } d" ), dict( callbackName = "leaveStruct" ), ] ) def test_justADefine( self ): self._simpleTest( "#define nothing nada\n", [] ) self._simpleTest( "#define nothing\n\nnothing", [] ) self._simpleTest( "#define nothing\n\nnothing int a;", [ dict( callbackName = "variableDeclaration", name = "a", text = "int a" ), ] ) self._simpleTest( "#define a b\n\nint a;", [ dict( callbackName = "variableDeclaration", name = "b", text = "int a" ), ] ) def notest_KnownIssue_DefiningIntBoolOrBuiltinTypes( self ): self._simpleTest( "#define int int\n\rbool b;\nint a;", [ dict( callbackName = "variableDeclaration", name = "b", text = "bool b" ), dict( callbackName = "variableDeclaration", name = "a", text = "int a" ), ] ) if __name__ == '__main__': unittest.main()
gpl-2.0
-3,396,596,157,572,860,400
54.688963
170
0.597141
false
hfercc/mese2014
lib/rest_framework/mixins.py
1
7228
""" Basic building blocks for generic class based views. We don't bind behaviour to http method handlers yet, which allows mixin classes to be composed in interesting ways. """ from __future__ import unicode_literals from django.core.exceptions import ValidationError from django.http import Http404 from rest_framework import status from rest_framework.response import Response from rest_framework.request import clone_request from rest_framework.settings import api_settings import warnings def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None): """ Given a model instance, and an optional pk and slug field, return the full list of all other field names on that model. For use when performing full_clean on a model instance, so we only clean the required fields. """ include = [] if pk: # Pending deprecation pk_field = obj._meta.pk while pk_field.rel: pk_field = pk_field.rel.to._meta.pk include.append(pk_field.name) if slug_field: # Pending deprecation include.append(slug_field) if lookup_field and lookup_field != 'pk': include.append(lookup_field) return [field.name for field in obj._meta.fields if field.name not in include] class CreateModelMixin(object): """ Create a model instance. """ def create(self, request, *args, **kwargs): data = dict(request.DATA) data.update(**kwargs) serializer = self.get_serializer(data=data, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) self.object = serializer.save(force_insert=True) self.post_save(self.object, created=True) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def get_success_headers(self, data): try: return {'Location': data[api_settings.URL_FIELD_NAME]} except (TypeError, KeyError): return {} class ListModelMixin(object): """ List a queryset. """ empty_error = "Empty list and '%(class_name)s.allow_empty' is False." def list(self, request, *args, **kwargs): self.object_list = self.filter_queryset(self.get_queryset()) # Default is to allow empty querysets. This can be altered by setting # `.allow_empty = False`, to raise 404 errors on empty querysets. if not self.allow_empty and not self.object_list: warnings.warn( 'The `allow_empty` parameter is due to be deprecated. ' 'To use `allow_empty=False` style behavior, You should override ' '`get_queryset()` and explicitly raise a 404 on empty querysets.', PendingDeprecationWarning ) class_name = self.__class__.__name__ error_msg = self.empty_error % {'class_name': class_name} raise Http404(error_msg) # Switch between paginated or standard style responses page = self.paginate_queryset(self.object_list) if page is not None: serializer = self.get_pagination_serializer(page) else: serializer = self.get_serializer(self.object_list, many=True) return Response(serializer.data) class RetrieveModelMixin(object): """ Retrieve a model instance. """ def retrieve(self, request, *args, **kwargs): self.object = self.get_object() serializer = self.get_serializer(self.object) return Response(serializer.data) class UpdateModelMixin(object): """ Update a model instance. """ def update(self, request, *args, **kwargs): partial = kwargs.pop('partial', False) self.object = self.get_object_or_none() serializer = self.get_serializer(self.object, data=request.DATA, files=request.FILES, partial=partial) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) try: self.pre_save(serializer.object) except ValidationError as err: # full_clean on model instance may be called in pre_save, # so we have to handle eventual errors. return Response(err.message_dict, status=status.HTTP_400_BAD_REQUEST) if self.object is None: self.object = serializer.save(force_insert=True) self.post_save(self.object, created=True) return Response(serializer.data, status=status.HTTP_201_CREATED) self.object = serializer.save(force_update=True) self.post_save(self.object, created=False) return Response(serializer.data, status=status.HTTP_200_OK) def partial_update(self, request, *args, **kwargs): kwargs['partial'] = True return self.update(request, *args, **kwargs) def get_object_or_none(self): try: return self.get_object() except Http404: if self.request.method == 'PUT': # For PUT-as-create operation, we need to ensure that we have # relevant permissions, as if this was a POST request. This # will either raise a PermissionDenied exception, or simply # return None. self.check_permissions(clone_request(self.request, 'POST')) else: # PATCH requests where the object does not exist should still # return a 404 response. raise def pre_save(self, obj): """ Set any attributes on the object that are implicit in the request. """ # pk and/or slug attributes are implicit in the URL. lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup = self.kwargs.get(lookup_url_kwarg, None) pk = self.kwargs.get(self.pk_url_kwarg, None) slug = self.kwargs.get(self.slug_url_kwarg, None) slug_field = slug and self.slug_field or None if lookup: setattr(obj, self.lookup_field, lookup) if pk: setattr(obj, 'pk', pk) if slug: setattr(obj, slug_field, slug) # Ensure we clean the attributes so that we don't eg return integer # pk using a string representation, as provided by the url conf kwarg. if hasattr(obj, 'full_clean'): exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field) obj.full_clean(exclude) class DestroyModelMixin(object): """ Destroy a model instance. """ def destroy(self, request, *args, **kwargs): obj = self.get_object() self.pre_delete(obj) obj.delete() self.post_delete(obj) return Response(status=status.HTTP_204_NO_CONTENT)
apache-2.0
8,436,068,386,180,543,000
34.505051
88
0.606945
false
aerostitch/nagios_checks
hdfs_datanode_balancing_status.py
1
4396
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Author: Joseph Herlant <[email protected]> # File name: hdfs_datanode_balancing_status.py # Creation date: 2014-10-08 # # Distributed under terms of the GNU GPLv3 license. """ This nagios active check parses the Hadoop HDFS web interface url: http://<namenode>:<port>/dfsnodelist.jsp?whatNodes=LIVE to check that no datanode is beyond the balancing threshold (in both ways). The goal of this check is to check if the balancer needs to be run manually and do its job correctly (while running for example in cron jobs). The output includes performance datas and is truncated if longer than 1024 chars. The values of the output are the variation between the average disk usage of the nodes over the cluster and the disk usage of the current node on the cluster. A negative value of X means that the node is X percent under the average disk usage of the datanodes over the cluster. A positive value means that it's over the average. Tested on: Hadoop CDH3U5 """ __author__ = 'Joseph Herlant' __copyright__ = 'Copyright 2014, Joseph Herlant' __credits__ = ['Joseph Herlant'] __license__ = 'GNU GPLv3' __version__ = '1.0.0' __maintainer__ = 'Joseph Herlant' __email__ = '[email protected]' __status__ = 'Production' __website__ = 'https://github.com/aerostitch/' from mechanize import Browser from BeautifulSoup import BeautifulSoup import argparse, sys if __name__ == '__main__': # use -h argument to get help parser = argparse.ArgumentParser( description='A Nagios check to verify that all datanodes of an HDFS \ cluster is in under the balancing threshold \ using the namenode web interface.') parser.add_argument('-n', '--namenode', required=True, help='hostname of the namenode of the cluster') parser.add_argument('-p', '--port', type=int, default=50070, help='port of the namenode http interface. \ Defaults to 50070.') parser.add_argument( '-w', '--warning', type=int, default=10, help='warning threshold. If the datanode usage differs from average \ usage to more than this threshold, raise a warning. Defaults to 10.' ) parser.add_argument( '-c', '--critical', type=int, default=15, help='critical threshold. If the datanode usage differs from average \ usage to more than this threshold, raise a critical. Defaults to 15.' ) args = parser.parse_args() # Get the web page from the namenode url = "http://%s:%d/dfsnodelist.jsp?whatNodes=LIVE" % (args.namenode, args.port) try: page = Browser().open(url) except IOError: print 'CRITICAL: Cannot access namenode interface on %s:%d!' % (args.namenode, args.port) sys.exit(2) # parse the page and storing the {datanode: pct_usage} hash html = page.read() soup = BeautifulSoup(html) datanodes = soup.findAll('td', {'class' : 'name'}) pcused = soup.findAll('td', {'class' : 'pcused', 'align' : 'right'}) nodes_pct = {} for (idx, node) in enumerate(datanodes): pct = float(pcused[idx].contents[0].strip()) node = datanodes[idx].findChildren('a')[0].contents[0].strip() nodes_pct[node] = pct # Each node variation against the average pct must be under the threshold w_msg = '' c_msg = '' perfdata = '' avg = 0 if len(nodes_pct) > 0: avg = float(sum(nodes_pct.values()))/len(nodes_pct) else: print 'CRITICAL: Unable to find any node.' sys.exit(2) for (node, pct) in nodes_pct.items(): if abs(pct-avg) >= args.critical: c_msg += ' %s=%.1f,' % (node, pct-avg) perfdata += ' %s=%.1f,' % (node, pct-avg) elif abs(avg-pct) >= args.warning: w_msg += ' %s=%.1f,' % (node, pct-avg) perfdata += ' %s=%.1f,' % (node, pct-avg) else: perfdata += ' %s=%.1f,' % (node, pct-avg) # Prints the values and exits with the nagios exit code if len(c_msg) > 0: print ('CRITICAL:%s%s |%s' % (c_msg, w_msg, perfdata)).strip(',')[:1024] sys.exit(2) elif len(w_msg) > 0: print ('WARNING:%s |%s' % (w_msg, perfdata)).strip(',')[:1024] sys.exit(1) else: print ('OK |%s' % (perfdata)).strip(',')[:1024] sys.exit(0)
gpl-2.0
1,909,556,536,262,776,600
37.226087
97
0.619882
false